scala.util.control.NonFatal Scala Examples

The following examples show how to use scala.util.control.NonFatal. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: NatchezHttp4sModule.scala    From skunk   with MIT License 8 votes vote down vote up
// Copyright (c) 2018-2020 by Rob Norris
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT

package natchez.http4s

import cats.~>
import cats.data.{ Kleisli, OptionT }
import cats.effect.Bracket
import cats.implicits._
import natchez.{ EntryPoint, Kernel, Span }
import org.http4s.HttpRoutes
import natchez.Trace
import natchez.Tags
import scala.util.control.NonFatal
import org.http4s.Response
import cats.effect.Resource
import cats.Defer
import natchez.TraceValue
import cats.Monad

object implicits {

  // Given an entry point and HTTP Routes in Kleisli[F, Span[F], ?] return routes in F. A new span
  // is created with the URI path as the name, either as a continuation of the incoming trace, if
  // any, or as a new root. This can likely be simplified, I just did what the types were saying
  // and it works so :shrug:
  private def liftT[F[_]: Bracket[?[_], Throwable]](
    entryPoint: EntryPoint[F])(
    routes:     HttpRoutes[Kleisli[F, Span[F], ?]]
  ): HttpRoutes[F] =
    Kleisli { req =>
      type G[A]  = Kleisli[F, Span[F], A]
      val lift   = λ[F ~> G](fa => Kleisli(_ => fa))
      val kernel = Kernel(req.headers.toList.map(h => (h.name.value -> h.value)).toMap)
      val spanR  = entryPoint.continueOrElseRoot(req.uri.path, kernel)
      OptionT {
        spanR.use { span =>
          val lower = λ[G ~> F](_(span))
          routes.run(req.mapK(lift)).mapK(lower).map(_.mapK(lower)).value
        }
      }
    }

  implicit class EntryPointOps[F[_]](self: EntryPoint[F]) {

    private def dummySpan(
      implicit ev: Monad[F]
    ): Span[F] =
      new Span[F] {
        val kernel: F[Kernel] = Kernel(Map.empty).pure[F]
        def put(fields: (String, TraceValue)*): F[Unit] = Monad[F].unit
        def span(name: String): Resource[F, Span[F]] = Monad[Resource[F, ?]].pure(this)
      }

    def liftT(routes: HttpRoutes[Kleisli[F, Span[F], ?]])(
      implicit ev: Bracket[F, Throwable]
    ): HttpRoutes[F] =
      implicits.liftT(self)(routes)

    
  def natchezMiddleware[F[_]: Bracket[?[_], Throwable]: Trace](routes: HttpRoutes[F]): HttpRoutes[F] =
    Kleisli { req =>

      val addRequestFields: F[Unit] =
        Trace[F].put(
          Tags.http.method(req.method.name),
          Tags.http.url(req.uri.renderString)
        )

      def addResponseFields(res: Response[F]): F[Unit] =
        Trace[F].put(
          Tags.http.status_code(res.status.code.toString)
        )

      def addErrorFields(e: Throwable): F[Unit] =
        Trace[F].put(
          Tags.error(true),
          "error.message"    -> e.getMessage,
          "error.stacktrace" -> e.getStackTrace.mkString("\n"),
        )

      OptionT {
        routes(req).onError {
          case NonFatal(e)   => OptionT.liftF(addRequestFields *> addErrorFields(e))
        } .value.flatMap {
          case Some(handler) => addRequestFields *> addResponseFields(handler).as(handler.some)
          case None          => Option.empty[Response[F]].pure[F]
        }
      }
    }

} 
Example 2
Source File: Enigma.scala    From matcher   with MIT License 6 votes vote down vote up
package com.wavesplatform.dex.crypto

import java.nio.charset.StandardCharsets
import java.security.NoSuchAlgorithmException
import java.security.spec.InvalidKeySpecException

import javax.crypto.spec.{PBEKeySpec, SecretKeySpec}
import javax.crypto.{Cipher, SecretKeyFactory}

import scala.util.control.NonFatal

object Enigma {

  private[this] val KeySalt           = "0495c728-1614-41f6-8ac3-966c22b4a62d".getBytes(StandardCharsets.UTF_8)
  private[this] val AES               = "AES"
  private[this] val Algorithm         = AES + "/ECB/PKCS5Padding"
  private[this] val HashingIterations = 999999
  private[this] val KeySizeBits       = 128

  def hashPassword(password: Array[Char],
                   salt: Array[Byte],
                   iterations: Int = HashingIterations,
                   keyLength: Int = KeySizeBits,
                   hashingAlgorithm: String = "PBKDF2WithHmacSHA512"): Array[Byte] =
    try {
      val keyFactory = SecretKeyFactory.getInstance(hashingAlgorithm)
      val keySpec    = new PBEKeySpec(password, salt, iterations, keyLength)
      val key        = keyFactory.generateSecret(keySpec)
      key.getEncoded
    } catch {
      case e @ (_: NoSuchAlgorithmException | _: InvalidKeySpecException) => throw new RuntimeException("Password hashing error", e)
    }

  def prepareDefaultKey(password: String): SecretKeySpec = new SecretKeySpec(hashPassword(password.toCharArray, KeySalt), AES)

  def encrypt(key: SecretKeySpec, bytes: Array[Byte]): Array[Byte] =
    try {
      val cipher = Cipher.getInstance(Algorithm)
      cipher.init(Cipher.ENCRYPT_MODE, key)
      cipher.doFinal(bytes)
    } catch {
      case NonFatal(e) => throw new RuntimeException("Encrypt error", e)
    }

  def decrypt(key: SecretKeySpec, encryptedBytes: Array[Byte]): Array[Byte] =
    try {
      val cipher: Cipher = Cipher.getInstance(Algorithm)
      cipher.init(Cipher.DECRYPT_MODE, key)
      cipher.doFinal(encryptedBytes)
    } catch {
      case NonFatal(e) => throw new RuntimeException("Decrypt error", e)
    }
} 
Example 3
Source File: Batcher.scala    From gfc-concurrent   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.concurrent

import java.util.concurrent.{Executors, ScheduledExecutorService => JScheduledExecutorService}
import java.util.concurrent.atomic.AtomicReference

import com.gilt.gfc.logging.Loggable

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.util.control.NonFatal



  @tailrec
  override
  def flush(): Unit = {
    val b@(_, records) = currentBatch.get()
    if (currentBatch.compareAndSet(b, emptyBatch)) {
      safeSubmitBatch(records)
    } else {
      flush() // retry
    }
  }

  override
  def shutdown(): Unit = {
    isRunning = false
    task.cancel(true)
    flush()
  }

  private[this]
  def safeSubmitBatch(records: Vector[R]): Unit = {
    if (!records.isEmpty) {
      lastSubmit = System.currentTimeMillis()
      try {
        submitBatch(records)
      } catch {
        case NonFatal(e) =>
          error(s"Failed to flush ${name} batch: ${e.getMessage}", e)
      }
    }
  }
} 
Example 4
Source File: TaxCalculationService.scala    From pertax-frontend   with Apache License 2.0 5 votes vote down vote up
package services

import com.kenshoo.play.metrics.Metrics
import com.google.inject.{Inject, Singleton}
import metrics._
import models.{TaxCalculation, TaxYearReconciliation}
import play.api.Mode.Mode
import play.api.http.Status._
import play.api.{Configuration, Environment, Logger}
import services.http.SimpleHttp
import uk.gov.hmrc.domain.Nino
import uk.gov.hmrc.http.{HeaderCarrier, HttpResponse}
import uk.gov.hmrc.play.bootstrap.config.ServicesConfig
import uk.gov.hmrc.play.bootstrap.http.HttpClient

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

sealed trait TaxCalculationResponse
case class TaxCalculationSuccessResponse(taxCalculation: TaxCalculation) extends TaxCalculationResponse
case object TaxCalculationNotFoundResponse extends TaxCalculationResponse
case class TaxCalculationUnexpectedResponse(r: HttpResponse) extends TaxCalculationResponse
case class TaxCalculationErrorResponse(cause: Exception) extends TaxCalculationResponse
@Singleton
class TaxCalculationService @Inject()(
  environment: Environment,
  configuration: Configuration,
  val simpleHttp: SimpleHttp,
  val metrics: Metrics,
  val http: HttpClient,
  servicesConfig: ServicesConfig)(implicit ec: ExecutionContext)
    extends HasMetrics {
  val mode: Mode = environment.mode
  val runModeConfiguration: Configuration = configuration
  lazy val taxCalcUrl = servicesConfig.baseUrl("taxcalc")

  
  def getTaxCalculation(nino: Nino, year: Int)(implicit hc: HeaderCarrier): Future[TaxCalculationResponse] =
    withMetricsTimer("get-taxcalc-summary") { t =>
      simpleHttp.get[TaxCalculationResponse](s"$taxCalcUrl/taxcalc/$nino/taxSummary/$year")(
        onComplete = {

          case r if r.status >= 200 && r.status < 300 =>
            Logger.debug(r.body)
            t.completeTimerAndIncrementSuccessCounter()
            TaxCalculationSuccessResponse(r.json.as[TaxCalculation])

          case r if r.status == NOT_FOUND =>
            Logger.debug(r.body)
            t.completeTimerAndIncrementSuccessCounter()
            TaxCalculationNotFoundResponse

          case r =>
            Logger.debug(r.body)
            t.completeTimerAndIncrementFailedCounter()
            Logger.debug(s"Unexpected ${r.status} response getting tax calculation from tax-calculation-service")
            TaxCalculationUnexpectedResponse(r)
        },
        onError = { e =>
          Logger.debug(e.toString)
          t.completeTimerAndIncrementFailedCounter()
          Logger.warn("Error getting tax calculation from tax-calculation-service", e)
          TaxCalculationErrorResponse(e)
        }
      )
    }

  def getTaxYearReconciliations(nino: Nino)(
    implicit headerCarrier: HeaderCarrier): Future[List[TaxYearReconciliation]] =
    http
      .GET[List[TaxYearReconciliation]](s"$taxCalcUrl/taxcalc/$nino/reconciliations")
      .recover {
        case NonFatal(e) =>
          Logger.debug(s"An exception was thrown by taxcalc reconciliations: ${e.getMessage}")
          Nil
      }
} 
Example 5
Source File: ExpiringStreamServiceCallAuthTests.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.auth

import java.time.Duration

import com.daml.grpc.{GrpcException, GrpcStatus}
import com.daml.platform.sandbox.services.SubmitAndWaitDummyCommand
import com.daml.platform.testing.StreamConsumer
import com.daml.timer.Delayed
import io.grpc.Status
import io.grpc.stub.StreamObserver

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal

trait ExpiringStreamServiceCallAuthTests[T]
    extends ReadOnlyServiceCallAuthTests
    with SubmitAndWaitDummyCommand {

  protected def stream: Option[String] => StreamObserver[T] => Unit

  private def expectExpiration(token: String): Future[Unit] = {
    val promise = Promise[Unit]()
    stream(Option(token))(new StreamObserver[T] {
      @volatile private[this] var gotSomething = false
      def onNext(value: T): Unit = {
        gotSomething = true
      }
      def onError(t: Throwable): Unit = {
        t match {
          case GrpcException(GrpcStatus(Status.Code.PERMISSION_DENIED, _), _) if gotSomething =>
            val _ = promise.trySuccess(())
          case NonFatal(e) =>
            val _ = promise.tryFailure(e)
        }
      }
      def onCompleted(): Unit = {
        val _ = promise.tryFailure(new RuntimeException("stream completed before token expiration"))
      }
    })
    promise.future
  }

  private def canActAsMainActorExpiresInFiveSeconds =
    toHeader(expiringIn(Duration.ofSeconds(5), readWriteToken(mainActor)))

  private def canReadAsMainActorExpiresInFiveSeconds =
    toHeader(expiringIn(Duration.ofSeconds(5), readOnlyToken(mainActor)))

  it should "break a stream in flight upon read-only token expiration" in {
    val _ = Delayed.Future.by(10.seconds)(submitAndWait())
    expectExpiration(canReadAsMainActorExpiresInFiveSeconds).map(_ => succeed)
  }

  it should "break a stream in flight upon read/write token expiration" in {
    val _ = Delayed.Future.by(10.seconds)(submitAndWait())
    expectExpiration(canActAsMainActorExpiresInFiveSeconds).map(_ => succeed)
  }

  override def serviceCallWithToken(token: Option[String]): Future[Any] =
    submitAndWait().flatMap(_ => new StreamConsumer[T](stream(token)).first())

} 
Example 6
Source File: Assertions.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testtool.infrastructure

import ai.x.diff.DiffShow
import com.daml.grpc.{GrpcException, GrpcStatus}
import java.util.regex.Pattern
import io.grpc.Status

import scala.language.higherKinds
import scala.util.control.NonFatal

object Assertions extends DiffExtensions {
  def fail(message: String): Nothing =
    throw new AssertionError(message)

  def fail(message: String, cause: Throwable): Nothing =
    throw new AssertionError(message, cause)

  def assertLength[A, F[_] <: Seq[_]](context: String, length: Int, as: F[A]): F[A] = {
    assert(as.length == length, s"$context: expected $length item(s), got ${as.length}")
    as
  }

  def assertSingleton[A](context: String, as: Seq[A]): A =
    assertLength(context, 1, as).head

  def assertEquals[T: DiffShow](context: String, actual: T, expected: T): Unit = {
    val diff = DiffShow.diff(actual, expected)
    if (!diff.isIdentical)
      throw AssertionErrorWithPreformattedMessage(
        diff.string,
        s"$context: two objects are supposed to be equal but they are not",
      )
  }

  
  def assertGrpcError(t: Throwable, expectedCode: Status.Code, pattern: String): Unit = {
    assertGrpcError(
      t,
      expectedCode,
      if (pattern.isEmpty) None else Some(Pattern.compile(Pattern.quote(pattern))))
  }
} 
Example 7
Source File: AuthorizationTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http

import java.nio.file.Files

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.auth.TokenHolder
import com.daml.bazeltools.BazelRunfiles.rlocation
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.daml.http.util.TestUtil.requiredFile
import com.daml.ledger.api.auth.{AuthServiceStatic, Claim, ClaimPublic, Claims}
import com.daml.ledger.client.LedgerClient
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.slf4j.LoggerFactory

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

final class AuthorizationTest extends AsyncFlatSpec with BeforeAndAfterAll with Matchers {

  private val dar = requiredFile(rlocation("docs/quickstart-model.dar"))
    .fold(e => throw new IllegalStateException(e), identity)

  private val testId: String = this.getClass.getSimpleName

  implicit val asys: ActorSystem = ActorSystem(testId)
  implicit val mat: Materializer = Materializer(asys)
  implicit val aesf: ExecutionSequencerFactory = new AkkaExecutionSequencerPool(testId)(asys)
  implicit val ec: ExecutionContext = asys.dispatcher

  private val publicToken = "public"
  private val emptyToken = "empty"
  private val mockedAuthService = Option(AuthServiceStatic {
    case `publicToken` => Claims(Seq[Claim](ClaimPublic))
    case `emptyToken` => Claims(Nil)
  })

  private val accessTokenFile = Files.createTempFile("Extractor", "AuthSpec")
  private val tokenHolder = Option(new TokenHolder(accessTokenFile))

  private def setToken(string: String): Unit = {
    val _ = Files.write(accessTokenFile, string.getBytes())
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    try {
      Files.delete(accessTokenFile)
    } catch {
      case NonFatal(e) =>
        LoggerFactory
          .getLogger(classOf[AuthorizationTest])
          .warn("Unable to delete temporary token file", e)
    }
  }

  protected def withLedger[A] =
    HttpServiceTestFixture
      .withLedger[A](List(dar), testId, Option(publicToken), mockedAuthService) _

  private def packageService(client: LedgerClient): PackageService =
    new PackageService(HttpService.loadPackageStoreUpdates(client.packageClient, tokenHolder))

  behavior of "PackageService against an authenticated sandbox"

  it should "fail immediately if the authorization is insufficient" in withLedger { client =>
    setToken(emptyToken)
    packageService(client).reload.failed.map(_ => succeed)
  }

  it should "succeed if the authorization is sufficient" in withLedger { client =>
    setToken(publicToken)
    packageService(client).reload.map(_ => succeed)
  }

} 
Example 8
Source File: MigrationStep.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml

import java.nio.file.Path

import akka.actor.ActorSystem
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.daml.lf.archive.DarReader
import scalaz.syntax.traverse._

import scala.concurrent.{ExecutionContext, Future}
import akka.stream.Materializer

import scala.util.control.NonFatal

object MigrationStep {

  trait Test {
    def execute(packageId: String, config: Config.Test)(
        implicit ec: ExecutionContext,
        esf: ExecutionSequencerFactory,
        mat: Materializer,
    ): Future[Unit]
  }

  private def readPackageId(path: Path): String =
    DarReader().readArchiveFromFile(path.toFile).get.map(_._1.toString).main

  def main(args: Array[String]): Unit = {
    val config = Config.parser.parse(args, Config.default).getOrElse(sys.exit(1))
    val packageId = readPackageId(config.dar)

    implicit val system: ActorSystem = ActorSystem(packageId)
    implicit val sequencer: ExecutionSequencerFactory =
      new AkkaExecutionSequencerPool(packageId)(system)
    implicit val ec: ExecutionContext = system.dispatcher

    val result = config.test.execute(packageId, config)

    result.failed.foreach { case NonFatal(e) => e.printStackTrace(System.err) }
    result.onComplete(_ => system.terminate())
  }
} 
Example 9
Source File: MultiFixtureBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit}

import com.daml.dec.DirectExecutionContext
import org.scalatest._
import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans}
import org.scalatest.exceptions.TestCanceledException
import org.scalatest.time.Span

import scala.collection.immutable.Iterable
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Future, Promise, TimeoutException}
import scala.util.control.{NoStackTrace, NonFatal}

trait MultiFixtureBase[FixtureId, TestContext]
    extends Assertions
    with BeforeAndAfterAll
    with ScaledTimeSpans
    with AsyncTimeLimitedTests {
  self: AsyncTestSuite =>

  private var es: ScheduledExecutorService = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    es = Executors.newScheduledThreadPool(1)
  }

  override protected def afterAll(): Unit = {
    es.shutdownNow()
    super.afterAll()
  }

  protected class TestFixture(val id: FixtureId, createContext: () => TestContext) {
    def context(): TestContext = createContext()
  }

  def timeLimit: Span = scaled(30.seconds)

  object TestFixture {
    def apply(id: FixtureId, createContext: () => TestContext): TestFixture =
      new TestFixture(id, createContext)

    def unapply(testFixture: TestFixture): Option[(FixtureId, TestContext)] =
      Some((testFixture.id, testFixture.context()))
  }

  protected def fixtures: Iterable[TestFixture]

  
  protected def allFixtures(runTest: TestContext => Future[Assertion]): Future[Assertion] =
    forAllFixtures(fixture => runTest(fixture.context))

  protected def forAllFixtures(runTest: TestFixture => Future[Assertion]): Future[Assertion] = {
    forAllMatchingFixtures { case f => runTest(f) }
  }

  protected def forAllMatchingFixtures(
      runTest: PartialFunction[TestFixture, Future[Assertion]]): Future[Assertion] = {
    if (parallelExecution) {
      val results = fixtures.map(
        fixture =>
          if (runTest.isDefinedAt(fixture))
            runTestAgainstFixture(fixture, runTest)
          else
            Future.successful(succeed))
      Future.sequence(results).map(foldAssertions)
    } else {
      fixtures.foldLeft(Future.successful(succeed)) {
        case (resultSoFar, thisFixture) =>
          resultSoFar.flatMap {
            case Succeeded => runTestAgainstFixture(thisFixture, runTest)
            case other => Future.successful(other)
          }
      }
    }
  }

} 
Example 10
Source File: IsStatusException.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import com.daml.grpc.{GrpcException, GrpcStatus}
import io.grpc.Status
import org.scalatest.{Assertion, Matchers}

import scala.util.control.NonFatal

object IsStatusException extends Matchers {

  def apply(expectedStatusCode: Status.Code)(throwable: Throwable): Assertion = {
    throwable match {
      case GrpcException(GrpcStatus(code, _), _) => code shouldEqual expectedStatusCode
      case NonFatal(other) => fail(s"$other is not a gRPC Status exception.")
    }
  }

  def apply(expectedStatus: Status): Throwable => Assertion = {
    apply(expectedStatus.getCode)
  }
} 
Example 11
Source File: AkkaExecutionSequencer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, ExtendedActorSystem, Props}
import akka.pattern.{AskTimeoutException, ask}
import akka.util.Timeout
import com.daml.grpc.adapter.RunnableSequencingActor.ShutdownRequest

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import com.daml.dec.DirectExecutionContext


  def closeAsync(implicit ec: ExecutionContext): Future[Done] =
    (actorRef ? ShutdownRequest).mapTo[Done].recover {
      case askTimeoutException: AskTimeoutException if actorIsTerminated(askTimeoutException) =>
        Done
    }

  private def actorIsTerminated(askTimeoutException: AskTimeoutException) = {
    AkkaExecutionSequencer.actorTerminatedRegex.findFirstIn(askTimeoutException.getMessage).nonEmpty
  }
}

object AkkaExecutionSequencer {
  def apply(name: String, terminationTimeout: FiniteDuration)(
      implicit system: ActorSystem): AkkaExecutionSequencer = {
    system match {
      case extendedSystem: ExtendedActorSystem =>
        new AkkaExecutionSequencer(
          extendedSystem.systemActorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))
      case _ =>
        new AkkaExecutionSequencer(system.actorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))

    }
  }

  private val actorTerminatedRegex = """Recipient\[.*]\] had already been terminated.""".r
}

private[grpc] class RunnableSequencingActor extends Actor with ActorLogging {
  @SuppressWarnings(Array("org.wartremover.warts.Any"))
  override val receive: Receive = {
    case runnable: Runnable =>
      try {
        runnable.run()
      } catch {
        case NonFatal(t) => log.error("Unexpected exception while executing Runnable", t)
      }
    case ShutdownRequest =>
      context.stop(self) // processing of the current message will continue
      sender() ! Done
  }
}

private[grpc] object RunnableSequencingActor {
  case object ShutdownRequest
} 
Example 12
Source File: Main.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.codegen

import com.daml.lf.codegen.conf.Conf
import com.typesafe.scalalogging.StrictLogging

import scala.util.control.NonFatal

object StandaloneMain extends StrictLogging {

  @deprecated("Use codegen font-end: com.daml.codegen.CodegenMain.main", "0.13.23")
  def main(args: Array[String]): Unit =
    try {
      Main.main(args)
    } catch {
      case NonFatal(t) =>
        logger.error(s"Error generating code: {}", t.getMessage)
        sys.exit(-1)
    }
}

object Main {
  @deprecated("Use codegen font-end: com.daml.codegen.CodegenMain.main", "0.13.23")
  def main(args: Array[String]): Unit =
    Conf.parse(args) match {
      case Some(conf) => CodeGenRunner.run(conf)
      case None =>
        throw new IllegalArgumentException(s"Invalid command line arguments: ${args.mkString(" ")}")
    }
} 
Example 13
Source File: ResourceManagement.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.codegen

import scala.util.control.NonFatal

private[codegen] object ResourceManagement {

  def withResources[T <: AutoCloseable, V](r: => T)(f: T => V): V = {
    var exception: Throwable = null
    try {
      f(r)
    } catch {
      case NonFatal(e) =>
        exception = e
        throw e
    } finally {
      closeAndAddSuppressed(exception, r)
    }
  }

  private def closeAndAddSuppressed(e: Throwable, resource: AutoCloseable): Unit = {
    if (e != null) {
      try {
        resource.close()
      } catch {
        case NonFatal(suppressed) =>
          e.addSuppressed(suppressed)
      }
    } else {
      resource.close()
    }
  }
} 
Example 14
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 15
Source File: RetryHelper.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.retrying

import java.lang.Math.floor

import akka.actor.Scheduler
import akka.pattern.after
import com.daml.ledger.client.binding.config.IRetryConfig
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object RetryHelper extends LazyLogging {

  
  val always: RetryStrategy = {
    case NonFatal(_) => true
  }

  def retry[T](retryConfig: Option[(Scheduler, IRetryConfig)])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        implicit val scheduler: Scheduler = rc._1
        retry(Option(rc._2))(retryStrategy)(f)
    }
  }

  def retry[T](retryConfig: Option[IRetryConfig])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        val maxAttempts = floor(rc.timeout / rc.interval).toInt
        retry(maxAttempts, rc.interval)(retryStrategy)(f)
    }
  }

  def retry[T](maxAttempts: Int, delay: FiniteDuration)(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {

    def shouldRetry(n: Int, e: Throwable): Boolean =
      n > 0 && retryStrategy.applyOrElse(e, (_: Throwable) => false)

    val remainingAttempts = maxAttempts - 1 // the next line will trigger a future evaluation

    f.recoverWith {
      case NonFatal(e) if shouldRetry(remainingAttempts, e) =>
        logWarning(remainingAttempts, e)
        after(delay, s)(retry(remainingAttempts, delay)(retryStrategy)(f))
    }
  }

  private def logWarning(remainingAttempts: Int, e: Throwable): Unit = {
    logger.warn(
      s"Retrying after failure. Attempts remaining: $remainingAttempts. Error: ${e.getMessage}")
  }
} 
Example 16
Source File: RetryHelper.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.util

import java.lang.Math.floor

import akka.actor.Scheduler
import akka.pattern.after
import com.daml.grpc.GrpcException
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal


  val always: RetryStrategy = {
    case NonFatal(_) => true
  }

  val failFastOnPermissionDenied: RetryStrategy = {
    case GrpcException.PERMISSION_DENIED() => false
    case NonFatal(_) => true
  }

  def retry[T](retryConfig: Option[(Scheduler, IRetryConfig)])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        implicit val scheduler: Scheduler = rc._1
        retry(Option(rc._2))(retryStrategy)(f)
    }
  }

  def retry[T](retryConfig: Option[IRetryConfig])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        val maxAttempts = floor(rc.timeout / rc.interval).toInt
        retry(maxAttempts, rc.interval)(retryStrategy)(f)
    }
  }

  def retry[T](maxAttempts: Int, delay: FiniteDuration)(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {

    def shouldRetry(n: Int, e: Throwable): Boolean =
      n > 0 && retryStrategy.applyOrElse(e, (_: Throwable) => false)

    val remainingAttempts = maxAttempts - 1 // the next line will trigger a future evaluation

    f.recoverWith {
      case NonFatal(e) if shouldRetry(remainingAttempts, e) =>
        logWarning(remainingAttempts, e)
        after(delay, s)(retry(remainingAttempts, delay)(retryStrategy)(f))
    }
  }

  private def logWarning(remainingAttempts: Int, e: Throwable): Unit = {
    logger.warn(
      s"Retrying after failure. Attempts remaining: $remainingAttempts. Error: ${e.getMessage}")
  }
} 
Example 17
Source File: ContextualizedLogger.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.logging

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.daml.grpc.GrpcException
import io.grpc.Status
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.util.{Failure, Try}
import scala.util.control.NonFatal

object ContextualizedLogger {

  // Caches loggers to prevent them from needlessly wasting memory
  // Replicates the behavior of the underlying Slf4j logger factory
  private[this] val cache = TrieMap.empty[String, ContextualizedLogger]

  // Allows to explicitly pass a logger, should be used for testing only
  private[logging] def createFor(withoutContext: Logger): ContextualizedLogger =
    new ContextualizedLogger(withoutContext)

  // Slf4j handles the caching of the underlying logger itself
  private[logging] def createFor(name: String): ContextualizedLogger =
    createFor(LoggerFactory.getLogger(name))

  
  def get(clazz: Class[_]): ContextualizedLogger = {
    val name = clazz.getName.stripSuffix("$")
    cache.getOrElseUpdate(name, createFor(name))
  }

}

final class ContextualizedLogger private (val withoutContext: Logger) {

  val trace = new LeveledLogger.Trace(withoutContext)
  val debug = new LeveledLogger.Debug(withoutContext)
  val info = new LeveledLogger.Info(withoutContext)
  val warn = new LeveledLogger.Warn(withoutContext)
  val error = new LeveledLogger.Error(withoutContext)

  private def internalOrUnknown(code: Status.Code): Boolean =
    code == Status.Code.INTERNAL || code == Status.Code.UNKNOWN

  private def logError(t: Throwable)(implicit logCtx: LoggingContext): Unit =
    error("Unhandled internal error", t)

  def logErrorsOnCall[Out](implicit logCtx: LoggingContext): PartialFunction[Try[Out], Unit] = {
    case Failure(e @ GrpcException(s, _)) =>
      if (internalOrUnknown(s.getCode)) {
        logError(e)
      }
    case Failure(NonFatal(e)) =>
      logError(e)
  }

  def logErrorsOnStream[Out](implicit logCtx: LoggingContext): Flow[Out, Out, NotUsed] =
    Flow[Out].mapError {
      case e @ GrpcException(s, _) =>
        if (internalOrUnknown(s.getCode)) {
          logError(e)
        }
        e
      case NonFatal(e) =>
        logError(e)
        e
    }

} 
Example 18
Source File: RetryStrategy.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.timer

import scala.concurrent.duration.{Duration, DurationInt}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object RetryStrategy {

  
  def constant(attempts: Option[Int] = None, waitTime: Duration)(
      predicate: PartialFunction[Throwable, Boolean]
  ): RetryStrategy =
    new RetryStrategy(attempts, waitTime, waitTime, identity, predicate)
}

final class RetryStrategy private (
    attempts: Option[Int],
    firstWaitTime: Duration,
    waitTimeCap: Duration,
    progression: Duration => Duration,
    predicate: PartialFunction[Throwable, Boolean]) {
  private def clip(t: Duration): Duration = t.min(waitTimeCap).max(0.millis)
  def apply[A](run: (Int, Duration) => Future[A])(implicit ec: ExecutionContext): Future[A] = {
    def go(attempt: Int, wait: Duration): Future[A] = {
      run(attempt, wait)
        .recoverWith {
          case NonFatal(throwable) if attempts.exists(attempt > _) =>
            Future.failed(throwable)
          case NonFatal(throwable) if predicate.lift(throwable).getOrElse(false) =>
            Delayed.Future.by(wait)(go(attempt + 1, clip(progression(wait))))
          case NonFatal(throwable) =>
            Future.failed(throwable)
        }
    }
    go(1, clip(firstWaitTime))
  }
} 
Example 19
Source File: Delayed.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.timer

import scala.concurrent.{ExecutionContext, Future => ScalaFuture}
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal

object Delayed {

  def by[T](t: Duration)(value: => T)(implicit ec: ExecutionContext): ScalaFuture[T] =
    Future.by(t)(ScalaFuture(value))

  object Future {
    def by[T](t: Duration)(value: => ScalaFuture[T])(
        implicit ec: ExecutionContext): ScalaFuture[T] =
      if (!t.isFinite) {
        ScalaFuture.failed(new IllegalArgumentException(s"A task cannot be postponed indefinitely"))
      } else if (t.length < 1) {
        try value
        catch { case NonFatal(e) => ScalaFuture.failed(e) }
      } else {
        val task = new PromiseTask(value)
        Timer.schedule(task, t.toMillis)
        task.future
      }
  }

} 
Example 20
Source File: ProgramResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.{Executors, TimeUnit}

import com.daml.logging.ContextualizedLogger
import com.daml.logging.LoggingContext.newLoggingContext
import com.daml.resources.ProgramResource._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Try
import scala.util.control.{NoStackTrace, NonFatal}

class ProgramResource[T](
    owner: => ResourceOwner[T],
    tearDownTimeout: FiniteDuration = 10.seconds,
) {
  private val logger = ContextualizedLogger.get(getClass)

  private val executorService = Executors.newCachedThreadPool()

  def run(): Unit = {
    newLoggingContext { implicit logCtx =>
      val resource = {
        implicit val executionContext: ExecutionContext =
          ExecutionContext.fromExecutor(executorService)
        Try(owner.acquire()).fold(Resource.failed, identity)
      }

      def stop(): Unit = {
        Await.result(resource.release(), tearDownTimeout)
        executorService.shutdown()
        executorService.awaitTermination(tearDownTimeout.toMillis, TimeUnit.MILLISECONDS)
        ()
      }

      sys.runtime.addShutdownHook(new Thread(() => {
        try {
          stop()
        } catch {
          case NonFatal(exception) =>
            logger.error("Failed to stop successfully.", exception)
        }
      }))

      // On failure, shut down immediately.
      resource.asFuture.failed.foreach { exception =>
        exception match {
          // The error is suppressed; we don't need to print anything more.
          case _: SuppressedStartupException =>
          case _: StartupException =>
            logger.error(
              s"Shutting down because of an initialization error.\n${exception.getMessage}")
          case NonFatal(_) =>
            logger.error("Shutting down because of an initialization error.", exception)
        }
        sys.exit(1) // `stop` will be triggered by the shutdown hook.
      }(ExecutionContext.global) // Run on the global execution context to avoid deadlock.
    }
  }
}

object ProgramResource {

  trait StartupException extends NoStackTrace {
    self: Exception =>
  }

  trait SuppressedStartupException {
    self: Exception =>
  }
} 
Example 21
Source File: ThreadUtil.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.common.thread

import java.lang.Thread.UncaughtExceptionHandler
import java.nio.channels.AsynchronousChannelGroup
import java.nio.channels.spi.AsynchronousChannelProvider
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, ThreadFactory}

import cats.effect.{Resource, Sync}

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

object ThreadUtil {
  def named(threadPrefix: String, daemon: Boolean, exitJvmOnFatalError: Boolean = true): ThreadFactory =
    new ThreadFactory {
      val defaultThreadFactory = Executors.defaultThreadFactory()
      val idx                  = new AtomicInteger(0)
      def newThread(r: Runnable) = {
        val t = defaultThreadFactory.newThread(r)
        t.setDaemon(daemon)
        t.setName(s"$threadPrefix-${idx.incrementAndGet()}")
        t.setUncaughtExceptionHandler(new UncaughtExceptionHandler {
          def uncaughtException(t: Thread, e: Throwable): Unit = {
            ExecutionContext.defaultReporter(e)
            if (exitJvmOnFatalError) {
              e match {
                case NonFatal(_) => ()
                case _           => System.exit(-1)
              }
            }
          }
        })
        t
      }
    }

  def blockingThreadPool[F[_]](name: String)(implicit F: Sync[F]): Resource[F, ExecutionContext] =
    Resource(F.delay {
      val factory  = named(name, daemon = true)
      val executor = Executors.newCachedThreadPool(factory)
      val ec       = ExecutionContext.fromExecutor(executor)
      (ec, F.delay(executor.shutdown()))
    })

  def acg[F[_]](implicit F: Sync[F]): Resource[F, AsynchronousChannelGroup] =
    Resource(F.delay {
      val acg = acgUnsafe
      (acg, F.delay(acg.shutdownNow()))
    })

  def acgUnsafe: AsynchronousChannelGroup =
    AsynchronousChannelProvider
      .provider()
      .openAsynchronousChannelGroup(8, named("jbok-ag-tcp", daemon = true))

  lazy val acgGlobal: AsynchronousChannelGroup = acgUnsafe
} 
Example 22
Source File: TestSpec.scala    From nanotest-strawman   with Apache License 2.0 5 votes vote down vote up
package verify

import scala.concurrent.{ ExecutionContext, Future, Promise }
import scala.util.control.NonFatal
import scala.util.{ Failure, Success }
import verify.sourcecode.SourceLocation

case class TestSpec[I, +O](name: String, f: I => Future[Result[O]]) extends (I => Future[Result[O]]) {

  override def apply(v1: I): Future[Result[O]] = f(v1)
}

object TestSpec {
  def async[Env](name: String, cb: Env => Future[Unit])(implicit ec: ExecutionContext): TestSpec[Env, Unit] =
    TestSpec(
      name, { env =>
        val f: Future[Unit] =
          try cb(env)
          catch { case NonFatal(ex) => Future.failed(ex) }

        val p = Promise[Result[Unit]]()
        f.onComplete {
          case Success(_) =>
            p.success(Result.Success(()))
          case Failure(ex) =>
            p.success(Result.from(ex))
        }
        p.future
      }
    )

  def sync[Env](name: String, cb: Env => Void): TestSpec[Env, Unit] =
    TestSpec(
      name, { env =>
        try {
          cb(env) match {
            case Void.UnitRef =>
              Future.successful(Result.Success(()))
            case Void.Caught(ref, loc) =>
              Future.successful(unexpected(ref, loc))
          }
        } catch {
          case NonFatal(ex) =>
            Future.successful(Result.from(ex))
        }
      }
    )

  private def unexpected[A](ref: A, loc: SourceLocation): Result[Nothing] =
    Result.Failure(
      s"Problem with test spec, expecting `Unit`, but received: $ref ",
      None,
      Some(loc)
    )
} 
Example 23
Source File: Properties.scala    From nanotest-strawman   with Apache License 2.0 5 votes vote down vote up
package verify

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.control.NonFatal
import verify.Utils.silent

case class Properties[I](
    setup: () => I,
    tearDown: I => Void,
    setupSuite: () => Unit,
    tearDownSuite: () => Unit,
    properties: Seq[TestSpec[I, Unit]]
)(implicit ec: ExecutionContext)
    extends Iterable[TestSpec[Unit, Unit]] {

  def iterator: Iterator[TestSpec[Unit, Unit]] = {
    for (property <- properties.iterator)
      yield TestSpec[Unit, Unit](
        property.name, { _ =>
          try {
            val env = setup()
            val result = try property(env)
            catch {
              case NonFatal(ex) =>
                Future.successful(Result.from(ex))
            }

            result.flatMap {
              case Result.Success(_) =>
                TestSpec.sync(property.name, tearDown)(env)
              case error =>
                silent(tearDown(env))
                Future.successful(error)
            }
          } catch {
            case NonFatal(ex) =>
              Future.successful(Result.from(ex))
          }
        }
      )
  }
} 
Example 24
Source File: RouterMetrics.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import scala.collection.concurrent.TrieMap
import scala.util.control.NonFatal

import org.slf4j.LoggerFactory

import io.prometheus.client.{Counter, Gauge}

object RouterMetrics {
  private val logger = LoggerFactory.getLogger(RouterMetrics.getClass)
  private val map = TrieMap[Entity, RouterMetrics]()
  def metricsFor(e: Entity): Option[RouterMetrics] = {
    try {
      Some(map.getOrElseUpdate(e, new RouterMetrics(e)))
    } catch {
      case NonFatal(t) => {
        logger.warn("Issue with getOrElseUpdate (failing over to simple get)", t)
        map.get(e)
      }
    }
  }
  def hasMetricsFor(e: Entity): Boolean = map.contains(e)
}

class RouterMetrics(entity: Entity) {
  val actorName = metricFriendlyActorName(entity.name)
  val routingTime = Gauge.build().name(s"akka_router_routing_time_$actorName").help("Akka Router routing time (Seconds)").register()
  val processingTime = Gauge.build().name(s"akka_router_processing_time_$actorName").help("Akka Router processing time (Seconds)").register()
  val timeInMailbox = Gauge.build().name(s"akka_router_time_in_mailbox_$actorName").help("Akka Router time in mailbox (Seconds)").register()
  val messages = Counter.build().name(s"akka_router_message_count_$actorName").help("Akka Router messages").register()
  val errors = Counter.build().name(s"akka_router_error_count_$actorName").help("Akka Router errors").register()
} 
Example 25
Source File: ActorMetrics.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import scala.collection.concurrent.TrieMap
import scala.util.control.NonFatal

import org.slf4j.LoggerFactory

import io.prometheus.client.{Counter, Gauge}

object ActorMetrics {
  private val logger = LoggerFactory.getLogger(ActorMetrics.getClass)
  private val map = TrieMap[Entity, ActorMetrics]()
  def metricsFor(e: Entity): Option[ActorMetrics] = {
    try {
      Some(map.getOrElseUpdate(e, new ActorMetrics(e)))
    } catch {
      case NonFatal(t) => {
        logger.warn("Issue with getOrElseUpdate (failing over to simple get)", t)
        map.get(e)
      }
    }
  }
  def hasMetricsFor(e: Entity): Boolean = map.contains(e)
}

class ActorMetrics(entity: Entity) {
  val actorName = metricFriendlyActorName(entity.name)
  val mailboxSize = Gauge.build().name(s"akka_actor_mailbox_size_$actorName").help("Akka Actor mailbox size").register()
  val processingTime = Gauge.build().name(s"akka_actor_processing_time_$actorName").help("Akka Actor processing time (Seconds)").register()
  val timeInMailbox = Gauge.build().name(s"akka_actor_time_in_mailbox_$actorName").help("Akka Actor time in mailbox (Seconds)").register()
  val messages = Counter.build().name(s"akka_actor_message_count_$actorName").help("Akka Actor messages").register()
  val errors = Counter.build().name(s"akka_actor_error_count_$actorName").help("Akka Actor errors").register()
} 
Example 26
Source File: BlockingIO.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.util

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future, Promise, blocking}
import scala.util.control.NonFatal
import monix.execution.Scheduler

// https://github.com/alexandru/scala-best-practices/blob/master/sections/4-concurrency-parallelism.md
object BlockingIO {
  private val ioThreadPool = Scheduler.io(name = "io-thread")

  def future[T](t: => T): Future[T] = {
    val p = Promise[T]()

    val runnable = new Runnable {
      def run() = try {
        p.success(blocking(t))
      } catch {
        case NonFatal(ex) => p.failure(ex)
      }
    }

    ioThreadPool.execute(runnable)

    p.future
  }
} 
Example 27
Source File: MessageFlowTransformerUtil.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.server.util

import akka.stream._
import akka.stream.scaladsl._
import com.trueaccord.scalapb.json.JsonFormat
import play.api.http.websocket._
import play.api.libs.streams._
import play.api.mvc.WebSocket.MessageFlowTransformer
import scala.util.control.NonFatal
import scala.util.Try
import walfie.gbf.raidfinder.protocol._

object MessageFlowTransformerUtil {
  private type ProtobufMessageFlowTransformer = MessageFlowTransformer[RequestMessage, BinaryProtobuf]

  // Throwing a WebSocketCloseException doesn't seem to actually propagate the
  // close reason to the client, despite what the ScalaDoc page says.
  // https://www.playframework.com/documentation/2.5.x/api/scala/index.html#play.api.http.websocket.WebSocketCloseException
  private def closeWebsocket(binary: Boolean): WebSocketCloseException = {
    val closeMessage = CloseMessage(Some(CloseCodes.InconsistentData), "Invalid input")
    WebSocketCloseException(closeMessage)
  }

  implicit val protobufJsonMessageFlowTransformer: ProtobufMessageFlowTransformer = {
    MessageFlowTransformer.stringMessageFlowTransformer.map(
      s => Try(JsonFormat.fromJsonString[RequestMessage](s))
        .getOrElse(throw closeWebsocket(binary = false)),
      binary => JsonFormat.toJsonString(
        ResponseMessage
          .validate(binary.value)
          .getOrElse(throw closeWebsocket(binary = false))
      )
    )
  }

  implicit val protobufBinaryMessageFlowTransformer: ProtobufMessageFlowTransformer = {
    MessageFlowTransformer.byteArrayMessageFlowTransformer.map(
      RequestMessage.validate(_).getOrElse(throw closeWebsocket(binary = true)),
      _.value
    )
  }
} 
Example 28
Source File: DevboxSetupMain.scala    From devbox   with Apache License 2.0 5 votes vote down vote up
package devbox.agent

import java.io.ByteArrayOutputStream

import scala.util.control.NonFatal


object DevboxSetupMain {

  def main(args: Array[String]): Unit = {
    val baos = new ByteArrayOutputStream()
    os.Internals.transfer(System.in, baos)
    val buffer = baos.toByteArray
    val allSetupFilesAndCommands =
      upickle.default.readBinary[Seq[Either[(String, Array[Byte]), String]]](buffer)

    val userName = sys.env.getOrElse("DEVBOX_USER", os.proc("whoami").call().out.trim)

    allSetupFilesAndCommands.foreach{
      case Left((destination, bytes)) =>

        // we run as root, so we need to expand ~ to DEVBOX_USER here
        val expandedDestination = destination match{
          case s"~/$rest" => os.root / "home" / userName / os.SubPath(rest)
          case dest => os.Path(dest)
        }
        try {
          os.write.over(expandedDestination, bytes, createFolders = true)
          os.perms.set(expandedDestination, "rwxrwxrwx")
        } catch {
          case NonFatal(e) =>
            println(s"Error writing file $destination: ${e.getMessage}")
        }
      case Right(cmd) =>
        println("Running remote command: " + cmd)
        os.proc("bash", "-c", cmd).call()
    }
  }
} 
Example 29
Source File: JsonUtils.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import java.io.Writer

import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.kafka.common.TopicPartition
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


  def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
    val result = new HashMap[String, HashMap[Int, Long]]()
    partitionOffsets.foreach { case (tp, off) =>
        val parts = result.getOrElse(tp.topic, new HashMap[Int, Long])
        parts += tp.partition -> off
        result += tp.topic -> parts
    }
    Serialization.write(result)
  }
} 
Example 30
Source File: CreateHiveTableAsSelectCommand.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.execution

import scala.util.control.NonFatal

import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, LogicalPlan}
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.hive.MetastoreRelation



case class CreateHiveTableAsSelectCommand(
    tableDesc: CatalogTable,
    query: LogicalPlan,
    ignoreIfExists: Boolean)
  extends RunnableCommand {

  private val tableIdentifier = tableDesc.identifier

  override def innerChildren: Seq[LogicalPlan] = Seq(query)

  override def run(sparkSession: SparkSession): Seq[Row] = {
    lazy val metastoreRelation: MetastoreRelation = {
      import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
      import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
      import org.apache.hadoop.io.Text
      import org.apache.hadoop.mapred.TextInputFormat

      val withFormat =
        tableDesc.withNewStorage(
          inputFormat =
            tableDesc.storage.inputFormat.orElse(Some(classOf[TextInputFormat].getName)),
          outputFormat =
            tableDesc.storage.outputFormat
              .orElse(Some(classOf[HiveIgnoreKeyTextOutputFormat[Text, Text]].getName)),
          serde = tableDesc.storage.serde.orElse(Some(classOf[LazySimpleSerDe].getName)),
          compressed = tableDesc.storage.compressed)

      val withSchema = if (withFormat.schema.isEmpty) {
        // Hive doesn't support specifying the column list for target table in CTAS
        // However we don't think SparkSQL should follow that.
        tableDesc.copy(schema = query.output.toStructType)
      } else {
        withFormat
      }

      sparkSession.sessionState.catalog.createTable(withSchema, ignoreIfExists = false)

      // Get the Metastore Relation
      sparkSession.sessionState.catalog.lookupRelation(tableIdentifier) match {
        case r: MetastoreRelation => r
      }
    }
    // TODO ideally, we should get the output data ready first and then
    // add the relation into catalog, just in case of failure occurs while data
    // processing.
    if (sparkSession.sessionState.catalog.tableExists(tableIdentifier)) {
      if (ignoreIfExists) {
        // table already exists, will do nothing, to keep consistent with Hive
      } else {
        throw new AnalysisException(s"$tableIdentifier already exists.")
      }
    } else {
      try {
        sparkSession.sessionState.executePlan(InsertIntoTable(
          metastoreRelation, Map(), query, overwrite = true, ifNotExists = false)).toRdd
      } catch {
        case NonFatal(e) =>
          // drop the created table.
          sparkSession.sessionState.catalog.dropTable(tableIdentifier, ignoreIfNotExists = true,
            purge = false)
          throw e
      }
    }

    Seq.empty[Row]
  }

  override def argString: String = {
    s"[Database:${tableDesc.database}}, " +
    s"TableName: ${tableDesc.identifier.table}, " +
    s"InsertIntoHiveTable]"
  }
} 
Example 31
Source File: SQLBuilderTest.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst

import scala.util.control.NonFatal

import org.apache.spark.sql.{DataFrame, Dataset, QueryTest}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.hive.test.TestHiveSingleton


abstract class SQLBuilderTest extends QueryTest with TestHiveSingleton {
  protected def checkSQL(e: Expression, expectedSQL: String): Unit = {
    val actualSQL = e.sql
    try {
      assert(actualSQL === expectedSQL)
    } catch {
      case cause: Throwable =>
        fail(
          s"""Wrong SQL generated for the following expression:
             |
             |${e.prettyName}
             |
             |$cause
           """.stripMargin)
    }
  }

  protected def checkSQL(plan: LogicalPlan, expectedSQL: String): Unit = {
    val generatedSQL = try new SQLBuilder(plan).toSQL catch { case NonFatal(e) =>
      fail(
        s"""Cannot convert the following logical query plan to SQL:
           |
           |${plan.treeString}
         """.stripMargin)
    }

    try {
      assert(generatedSQL === expectedSQL)
    } catch {
      case cause: Throwable =>
        fail(
          s"""Wrong SQL generated for the following logical query plan:
             |
             |${plan.treeString}
             |
             |$cause
           """.stripMargin)
    }

    checkAnswer(spark.sql(generatedSQL), Dataset.ofRows(spark, plan))
  }

  protected def checkSQL(df: DataFrame, expectedSQL: String): Unit = {
    checkSQL(df.queryExecution.analyzed, expectedSQL)
  }
} 
Example 32
Source File: ResolveInlineTables.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst.analysis

import scala.util.control.NonFatal

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Cast
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types.{StructField, StructType}


  private[analysis] def convert(table: UnresolvedInlineTable): LocalRelation = {
    // For each column, traverse all the values and find a common data type and nullability.
    val fields = table.rows.transpose.zip(table.names).map { case (column, name) =>
      val inputTypes = column.map(_.dataType)
      val tpe = TypeCoercion.findWiderTypeWithoutStringPromotion(inputTypes).getOrElse {
        table.failAnalysis(s"incompatible types found in column $name for inline table")
      }
      StructField(name, tpe, nullable = column.exists(_.nullable))
    }
    val attributes = StructType(fields).toAttributes
    assert(fields.size == table.names.size)

    val newRows: Seq[InternalRow] = table.rows.map { row =>
      InternalRow.fromSeq(row.zipWithIndex.map { case (e, ci) =>
        val targetType = fields(ci).dataType
        try {
          if (e.dataType.sameType(targetType)) {
            e.eval()
          } else {
            Cast(e, targetType).eval()
          }
        } catch {
          case NonFatal(ex) =>
            table.failAnalysis(s"failed to evaluate expression ${e.sql}: ${ex.getMessage}")
        }
      })
    }

    LocalRelation(attributes, newRows)
  }
} 
Example 33
Source File: HBaseCredentialProvider.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn.security

import scala.reflect.runtime.universe
import scala.util.control.NonFatal

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.Credentials
import org.apache.hadoop.security.token.{Token, TokenIdentifier}

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging

private[security] class HBaseCredentialProvider extends ServiceCredentialProvider with Logging {

  override def serviceName: String = "hbase"

  override def obtainCredentials(
      hadoopConf: Configuration,
      sparkConf: SparkConf,
      creds: Credentials): Option[Long] = {
    try {
      val mirror = universe.runtimeMirror(getClass.getClassLoader)
      val obtainToken = mirror.classLoader.
        loadClass("org.apache.hadoop.hbase.security.token.TokenUtil").
        getMethod("obtainToken", classOf[Configuration])

      logDebug("Attempting to fetch HBase security token.")
      val token = obtainToken.invoke(null, hbaseConf(hadoopConf))
        .asInstanceOf[Token[_ <: TokenIdentifier]]
      logInfo(s"Get token from HBase: ${token.toString}")
      creds.addToken(token.getService, token)
    } catch {
      case NonFatal(e) =>
        logDebug(s"Failed to get token from service $serviceName", e)
    }

    None
  }

  override def credentialsRequired(hadoopConf: Configuration): Boolean = {
    hbaseConf(hadoopConf).get("hbase.security.authentication") == "kerberos"
  }

  private def hbaseConf(conf: Configuration): Configuration = {
    try {
      val mirror = universe.runtimeMirror(getClass.getClassLoader)
      val confCreate = mirror.classLoader.
        loadClass("org.apache.hadoop.hbase.HBaseConfiguration").
        getMethod("create", classOf[Configuration])
      confCreate.invoke(null, conf).asInstanceOf[Configuration]
    } catch {
      case NonFatal(e) =>
        logDebug("Fail to invoke HBaseConfiguration", e)
        conf
    }
  }
} 
Example 34
Source File: SocketInputDStream.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.dstream

import java.io._
import java.net.{ConnectException, Socket}
import java.nio.charset.StandardCharsets

import scala.reflect.ClassTag
import scala.util.control.NonFatal

import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.util.NextIterator

private[streaming]
class SocketInputDStream[T: ClassTag](
    _ssc: StreamingContext,
    host: String,
    port: Int,
    bytesToObjects: InputStream => Iterator[T],
    storageLevel: StorageLevel
  ) extends ReceiverInputDStream[T](_ssc) {

  def getReceiver(): Receiver[T] = {
    new SocketReceiver(host, port, bytesToObjects, storageLevel)
  }
}

private[streaming]
class SocketReceiver[T: ClassTag](
    host: String,
    port: Int,
    bytesToObjects: InputStream => Iterator[T],
    storageLevel: StorageLevel
  ) extends Receiver[T](storageLevel) with Logging {

  private var socket: Socket = _

  def onStart() {

    logInfo(s"Connecting to $host:$port")
    try {
      socket = new Socket(host, port)
    } catch {
      case e: ConnectException =>
        restart(s"Error connecting to $host:$port", e)
        return
    }
    logInfo(s"Connected to $host:$port")

    // Start the thread that receives data over a connection
    new Thread("Socket Receiver") {
      setDaemon(true)
      override def run() { receive() }
    }.start()
  }

  def onStop() {
    // in case restart thread close it twice
    synchronized {
      if (socket != null) {
        socket.close()
        socket = null
        logInfo(s"Closed socket to $host:$port")
      }
    }
  }

  
  def bytesToLines(inputStream: InputStream): Iterator[String] = {
    val dataInputStream = new BufferedReader(
      new InputStreamReader(inputStream, StandardCharsets.UTF_8))
    new NextIterator[String] {
      protected override def getNext() = {
        val nextValue = dataInputStream.readLine()
        if (nextValue == null) {
          finished = true
        }
        nextValue
      }

      protected override def close() {
        dataInputStream.close()
      }
    }
  }
} 
Example 35
Source File: EventLogDownloadResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 36
Source File: RpcTimeout.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc

import java.util.concurrent.TimeoutException

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.util.control.NonFatal

import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.util.Utils


  def apply(conf: SparkConf, timeoutPropList: Seq[String], defaultValue: String): RpcTimeout = {
    require(timeoutPropList.nonEmpty)

    // Find the first set property or use the default value with the first property
    val itr = timeoutPropList.iterator
    var foundProp: Option[(String, String)] = None
    while (itr.hasNext && foundProp.isEmpty) {
      val propKey = itr.next()
      conf.getOption(propKey).foreach { prop => foundProp = Some(propKey, prop) }
    }
    val finalProp = foundProp.getOrElse(timeoutPropList.head, defaultValue)
    val timeout = { Utils.timeStringAsSeconds(finalProp._2).seconds }
    new RpcTimeout(timeout, finalProp._1)
  }
} 
Example 37
Source File: TaskDescription.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.nio.ByteBuffer

import scala.collection.mutable
import scala.collection.mutable.HashSet
import scala.util.control.NonFatal

import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.SerializerInstance
import org.apache.spark.util.SerializableBuffer


private[spark] class TaskDescription(
    val taskId: Long,
    val attemptNumber: Int,
    val executorId: String,
    val name: String,
    val index: Int,    // Index within this task's TaskSet
    val isFutureTask: Boolean,
    @transient private val _task: Task[_],
    @transient private val _addedFiles: mutable.Map[String, Long],
    @transient private val _addedJars: mutable.Map[String, Long],
    @transient private val _ser: SerializerInstance)
  extends Serializable with Logging {

  // Because ByteBuffers are not serializable, wrap the task in a SerializableBuffer
  private var buffer: SerializableBuffer = _

  def prepareSerializedTask(): Unit = {
    if (_task != null) {
      val serializedTask: ByteBuffer = try {
        Task.serializeWithDependencies(_task, _addedFiles, _addedJars, _ser)
      } catch {
        // If the task cannot be serialized, then there is not point in re-attempting
        // the task as it will always fail. So just abort the task set.
        case NonFatal(e) =>
          val msg = s"Failed to serialize the task $taskId, not attempting to retry it."
          logError(msg, e)
          // FIXME(shivaram): We dont have a handle to the taskSet here to abort it.
          throw new TaskNotSerializableException(e)
      }
      if (serializedTask.limit > TaskSetManager.TASK_SIZE_TO_WARN_KB * 1024) {
        logWarning(s"Stage ${_task.stageId} contains a task of very large size " +
          s"(${serializedTask.limit / 1024} KB). The maximum recommended task size is " +
          s"${TaskSetManager.TASK_SIZE_TO_WARN_KB} KB.")
      }
      buffer = new SerializableBuffer(serializedTask)
    } else {
      buffer = new SerializableBuffer(ByteBuffer.allocate(0))
    }
  }

  def serializedTask: ByteBuffer = buffer.value

  override def toString: String = "TaskDescription(TID=%d, index=%d)".format(taskId, index)
} 
Example 38
Source File: package.scala    From kanadi   with MIT License 5 votes vote down vote up
package org.zalando

import java.net.URI
import cats.syntax.either._
import io.circe.Decoder.Result
import io.circe.syntax._
import io.circe.{Decoder, DecodingFailure, Encoder, HCursor}
import scala.util.control.NonFatal

package object kanadi {
  private[kanadi] implicit val uriEncoder: Encoder[URI] =
    Encoder.instance[URI](_.toString.asJson)

  private[kanadi] implicit val uriDecoder: Decoder[URI] = new Decoder[URI] {
    override def apply(c: HCursor): Result[URI] =
      c.as[String].flatMap { value =>
        try {
          Right(new URI(value))
        } catch {
          case NonFatal(_) => Left(DecodingFailure("Invalid Uri", c.history))
        }
      }
  }
} 
Example 39
Source File: ResourceManagement.scala    From scalismo-faces   with Apache License 2.0 5 votes vote down vote up
package scalismo.faces.utils

import java.io.Closeable

import scala.io.Source
import scala.util.control.NonFatal
import scala.util.{Failure, Try}


  def usingOption[T <: Closeable, R](obj: => Option[T], after: T => Unit = { t: T => t.close() })(block: T => Option[R]): Option[R] = {
    val o: Option[T] = try {
      obj
    } catch {
      case NonFatal(e) => None
    }
    o.flatMap { res =>
      try {
        block(res)
      } finally {
        after(res)
      }
    }
  }

} 
Example 40
Source File: TracingDirective.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.akka.http

import akka.http.scaladsl.model.{HttpHeader, HttpRequest}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Directive0, Directive1, ExceptionHandler}
import com.typesafe.scalalogging.LazyLogging
import io.opencensus.scala.Tracing
import io.opencensus.scala.akka.http.propagation.AkkaB3FormatPropagation
import io.opencensus.scala.akka.http.trace.EndSpanResponse
import io.opencensus.scala.akka.http.trace.HttpExtractors._
import io.opencensus.scala.http.{HttpAttributes, ServiceAttributes, ServiceData}
import io.opencensus.scala.http.propagation.Propagation
import io.opencensus.trace.{Span, Status}

import scala.util.control.NonFatal

trait TracingDirective extends LazyLogging {

  protected def tracing: Tracing
  protected def propagation: Propagation[HttpHeader, HttpRequest]

  
  def traceRequestForServiceNoSpan(serviceData: ServiceData): Directive0 =
    traceRequest(serviceData).map(_ => ())

  private def traceRequest(serviceData: ServiceData): Directive1[Span] =
    extractRequest.flatMap { req =>
      val span = buildSpan(req, serviceData)
      recordSuccess(span) & recordException(span) & provide(span)
    }

  private def buildSpan(req: HttpRequest, serviceData: ServiceData): Span = {
    val name = req.uri.path.toString()

    val span = propagation
      .extractContext(req)
      .fold(
        { error =>
          logger.debug("Extracting of parent context failed", error)
          tracing.startSpan(name)
        },
        tracing.startSpanWithRemoteParent(name, _)
      )

    ServiceAttributes.setAttributesForService(span, serviceData)
    HttpAttributes.setAttributesForRequest(span, req)
    span
  }

  private def recordSuccess(span: Span) =
    mapResponse(EndSpanResponse.forServer(tracing, _, span))

  private def recordException(span: Span) =
    handleExceptions(ExceptionHandler {
      case NonFatal(ex) =>
        tracing.endSpan(span, Status.INTERNAL)
        throw ex
    })
}

object TracingDirective extends TracingDirective {
  override protected def tracing: Tracing = Tracing
  override protected def propagation: Propagation[HttpHeader, HttpRequest] =
    AkkaB3FormatPropagation
} 
Example 41
Source File: JsonBodyParser.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.rest.play.controllers

import akka.util.ByteString
import org.apache.s2graph.core.utils.logger
import play.api.Play
import play.api.libs.iteratee.Iteratee
import play.api.libs.json.{JsValue, Json}
import play.api.libs.streams.Streams
import play.api.mvc._

import scala.concurrent.Future
import scala.util.control.NonFatal

object s2parse extends BodyParsers {

  import parse._

  val defaultMaxTextLength = 1024 * 512
  val defaultMaxJsonLength = 1024 * 512

  def json: BodyParser[JsValue] = json(defaultMaxTextLength)

  
  def jsonText: BodyParser[String] = when(
    _.contentType.exists(m => m.equalsIgnoreCase("text/json") || m.equalsIgnoreCase("application/json")),
    jsonText(defaultMaxTextLength),
    createBadResult("Expecting text/json or application/json body")
  )

  private def jsonText(maxLength: Int): BodyParser[String] = BodyParser("json, maxLength=" + maxLength) { request =>
    import play.api.libs.iteratee.Execution.Implicits.trampoline
    import play.api.libs.iteratee.Traversable

    val iteratee = Traversable.takeUpTo[ByteString](maxLength)
      .transform(Iteratee.consume[ByteString]().map(_.utf8String))
      .flatMap(Iteratee.eofOrElse(Results.EntityTooLarge))

    Streams.iterateeToAccumulator(iteratee)
  }

  def json(maxLength: Int): BodyParser[JsValue] = when(
    _.contentType.exists(m => m.equalsIgnoreCase("text/json") || m.equalsIgnoreCase("application/json")),
    tolerantJson(maxLength),
    createBadResult("Expecting text/json or application/json body")
  )

  def tolerantJson(maxLength: Int): BodyParser[JsValue] =
    tolerantBodyParser[JsValue]("json", maxLength, "Invalid Json") { (request, bytes) =>
      // Encoding notes: RFC 4627 requires that JSON be encoded in Unicode, and states that whether that's
      // UTF-8, UTF-16 or UTF-32 can be auto detected by reading the first two bytes. So we ignore the declared
      // charset and don't decode, we passing the byte array as is because Jackson supports auto detection.
      Json.parse(bytes)
    }

  private def tolerantBodyParser[A](name: String, maxLength: Int, errorMessage: String)(parser: (RequestHeader, Array[Byte]) => A): BodyParser[A] =
    BodyParser(name + ", maxLength=" + maxLength) { request =>
      import play.api.libs.iteratee.Execution.Implicits.trampoline
      import play.api.libs.iteratee.Traversable

      import scala.util.control.Exception._

      val bodyParser: Iteratee[ByteString, Either[Result, Either[Future[Result], A]]] =
        Traversable.takeUpTo[ByteString](maxLength).transform(
          Iteratee.consume[ByteString]().map { bytes =>
            allCatch[A].either {
              parser(request, bytes.toByteBuffer.array())
            }.left.map {
              case NonFatal(e) =>
                val txt = bytes.utf8String
                logger.error(s"$errorMessage: $txt", e)
                createBadResult(s"$errorMessage: $e")(request)
              case t => throw t
            }
          }
        ).flatMap(Iteratee.eofOrElse(Results.EntityTooLarge))

      Streams.iterateeToAccumulator(bodyParser).mapFuture {
        case Left(tooLarge) => Future.successful(Left(tooLarge))
        case Right(Left(badResult)) => badResult.map(Left.apply)
        case Right(Right(body)) => Future.successful(Right(body))
      }
    }

  private def createBadResult(msg: String): RequestHeader => Future[Result] = { request =>
    Play.maybeApplication.map(_.global.onBadRequest(request, msg))
      .getOrElse(Future.successful(Results.BadRequest))
  }
} 
Example 42
Source File: MatchingRulesCache.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.caches

import java.util.concurrent.ConcurrentHashMap

import cats.data.NonEmptyList
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.settings.{DenormalizedMatchingRule, MatcherSettings, MatchingRule}

import scala.util.Try
import scala.util.control.NonFatal

class MatchingRulesCache(matcherSettings: MatcherSettings) extends ScorexLogging {

  private val allMatchingRules    = new ConcurrentHashMap[AssetPair, NonEmptyList[DenormalizedMatchingRule]]
  private val currentMatchingRule = new ConcurrentHashMap[AssetPair, DenormalizedMatchingRule]

  def getMatchingRules(assetPair: AssetPair, assetDecimals: Asset => Int): NonEmptyList[DenormalizedMatchingRule] = {
    allMatchingRules.computeIfAbsent(
      assetPair,
      _ => DenormalizedMatchingRule.getDenormalizedMatchingRules(matcherSettings, assetDecimals, assetPair)
    )
  }

  // DEX-488 TODO remove after found a reason of NPE
  def getDenormalizedRuleForNextOrder(assetPair: AssetPair, currentOffset: Long, assetDecimals: Asset => Int): DenormalizedMatchingRule = {

    lazy val defaultRule = DenormalizedMatchingRule.getDefaultRule(assetPair, assetDecimals)

    val result =
      Try {
        getMatchingRules(assetPair, assetDecimals).foldLeft(defaultRule) { case (acc, mr) => if (mr.startOffset <= (currentOffset + 1)) mr else acc }
      }.recover { case NonFatal(e) => log.error(s"Can't get a denormalized rule for a next order", e); defaultRule }
        .getOrElse(defaultRule)

    result.copy(tickSize = result.tickSize max defaultRule.tickSize)
  }

  def getNormalizedRuleForNextOrder(assetPair: AssetPair, currentOffset: Long, assetDecimals: Asset => Int): MatchingRule = {
    getDenormalizedRuleForNextOrder(assetPair, currentOffset, assetDecimals).normalize(assetPair, assetDecimals)
  }

  def updateCurrentMatchingRule(assetPair: AssetPair, denormalizedMatchingRule: DenormalizedMatchingRule): Unit = {
    currentMatchingRule.put(assetPair, denormalizedMatchingRule)
  }

  def setCurrentMatchingRuleForNewOrderBook(assetPair: AssetPair, currentOffset: Long, assetDecimals: Asset => Int): Unit = {
    updateCurrentMatchingRule(assetPair, getDenormalizedRuleForNextOrder(assetPair, currentOffset, assetDecimals))
  }
} 
Example 43
Source File: it.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform

import com.wavesplatform.dex.domain.account.{KeyPair, PublicKey}
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.domain.order.{Order, OrderType}
import com.wavesplatform.dex.waves.WavesFeeConstants._
import com.wavesplatform.it.api.MatcherCommand
import org.scalacheck.Gen

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, Future}
import scala.util.Random
import scala.util.control.NonFatal

package object it {

  
  def executeCommands(xs: Seq[MatcherCommand], ignoreErrors: Boolean = true, timeout: FiniteDuration = 3.minutes): Int = {
    Await.result(Future.sequence(xs.map(executeCommand(_, ignoreErrors))), timeout).sum
  }

  private def executeCommand(x: MatcherCommand, ignoreErrors: Boolean): Future[Int] =
    try x match {
      case MatcherCommand.Place(api, order) => api.tryPlace(order).map(_.fold(_ => 0, _ => 1))
      case MatcherCommand.Cancel(api, owner, order) =>
        api.tryCancel(owner, order).map(_.fold(_ => 0, _ => 1))
    } catch {
      case NonFatal(e) =>
        if (ignoreErrors) Future.successful(0)
        else Future.failed(e)
    }

  def orderGen(matcher: PublicKey,
               trader: KeyPair,
               assetPairs: Seq[AssetPair],
               types: Seq[OrderType] = Seq(OrderType.BUY, OrderType.SELL)): Gen[Order] = {
    val ts = System.currentTimeMillis()
    for {
      assetPair      <- Gen.oneOf(assetPairs)
      tpe            <- Gen.oneOf(types)
      amount         <- Gen.choose(10, 100)
      price          <- Gen.choose(10, 100)
      orderVersion   <- Gen.choose[Byte](1, 3)
      expirationDiff <- Gen.choose(600000, 6000000)
    } yield {
      if (tpe == OrderType.BUY)
        Order.buy(
          trader,
          matcher,
          assetPair,
          amount,
          price * Order.PriceConstant,
          ts,
          ts + expirationDiff,
          matcherFee,
          orderVersion
        )
      else
        Order.sell(
          trader,
          matcher,
          assetPair,
          amount,
          price * Order.PriceConstant,
          ts,
          ts + expirationDiff,
          matcherFee,
          orderVersion
        )
    }
  }

  def choose[T](xs: IndexedSeq[T]): T = xs(Random.nextInt(xs.size))
} 
Example 44
Source File: Base58.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.domain.bytes.codec

import scala.util.control.NonFatal

object Base58 extends BaseXXEncDec {

  private[this] val useSlowBase58: Boolean = sys.props.get("waves.use-slow-base58").exists(s => s.toLowerCase == "true" || s == "1")

  override val defaultDecodeLimit: Int = 192

  override def encode(array: Array[Byte]): String = {
    if (useSlowBase58) StdBase58.encode(array)
    else {
      try {
        FastBase58.encode(array)
      } catch {
        case NonFatal(_) => StdBase58.encode(array)
      }
    }
  }

  override def decode(str: String): Array[Byte] = {
    if (useSlowBase58) StdBase58.decode(str)
    else {
      try {
        FastBase58.decode(str)
      } catch {
        case NonFatal(_) => StdBase58.decode(str)
      }
    }
  }
} 
Example 45
Source File: GlobalTimer.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.time

import io.netty.util.{HashedWheelTimer, Timer}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal

object GlobalTimer {

  val instance: Timer = new HashedWheelTimer()

  sys.addShutdownHook {
    instance.stop()
  }

  implicit class TimerOpsImplicits(val timer: Timer) extends AnyVal {
    def schedule[A](f: => Future[A], delay: FiniteDuration): Future[A] = {
      val p = Promise[A]
      try {
        timer.newTimeout(_ => p.completeWith(f), delay.length, delay.unit)
      } catch {
        case NonFatal(e) => p.failure(e)
      }
      p.future
    }

    def sleep(term: FiniteDuration): Future[Unit] = schedule(Future.successful(()), term)
  }
} 
Example 46
Source File: FOps.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.fp

import java.nio.charset.StandardCharsets

import cats.syntax.apply._
import cats.syntax.either._
import cats.syntax.flatMap._
import cats.syntax.functor._
import com.softwaremill.sttp.{DeserializationError, Response}
import play.api.libs.json._

import scala.concurrent.duration.{FiniteDuration, _}
import scala.util.control.NonFatal

case class RepeatRequestOptions(delayBetweenRequests: FiniteDuration, maxAttempts: Int) {
  def decreaseAttempts: RepeatRequestOptions = copy(maxAttempts = maxAttempts - 1)
}

class FOps[F[_]](implicit M: ThrowableMonadError[F], W: CanWait[F]) {

  def repeatUntil[T](f: => F[T], options: RepeatRequestOptions = RepeatRequestOptions(1.second, 30))(stopCond: T => Boolean): F[T] =
    f.flatMap { firstResp =>
        (firstResp, options).tailRecM[F, (T, RepeatRequestOptions)] {
          case (resp, currOptions) =>
            if (stopCond(resp)) M.pure((resp, currOptions).asRight)
            else if (currOptions.maxAttempts <= 0) M.raiseError(new RuntimeException(s"All attempts are out! The last response is: $resp"))
            else W.wait(options.delayBetweenRequests).productR(f).map(x => (x, currOptions.decreaseAttempts).asLeft)
        }
      }
      .map(_._1)

  def repeatUntil[T](f: => F[T], delay: FiniteDuration)(pred: T => Boolean): F[T] =
    f.flatMap {
      _.tailRecM[F, T] { x =>
        if (pred(x)) M.pure(x.asRight)
        else W.wait(delay).productR(f).map(_.asLeft)
      }
    }

  def repeatUntilResponse[T](f: => F[Response[Either[DeserializationError[JsError], T]]], delay: FiniteDuration)(
      pred: Response[Either[DeserializationError[JsError], T]] => Boolean): F[T] =
    repeatUntil(f, delay)(pred).flatMap(parseResponse)

  def parseResponse[T](resp: Response[Either[DeserializationError[JsError], T]]): F[T] =
    resp.rawErrorBody match {
      case Left(e) =>
        M.raiseError[T](
          new RuntimeException(s"The server returned an error. HTTP code is ${resp.code}, body: ${new String(e, StandardCharsets.UTF_8)}"))
      case Right(Left(error)) => M.raiseError[T](new RuntimeException(s"Can't parse the response: $error"))
      case Right(Right(r))    => M.pure(r)
    }

  def parseTryResponse[E: Reads, T](resp: Response[T]): F[Either[E, T]] = resp.rawErrorBody match {
    case Right(r) => M.pure(Right(r))
    case Left(bytes) =>
      try Json.parse(bytes).validate[E] match {
        case JsSuccess(x, _) => M.pure(Left(x))
        case JsError(e)      => M.raiseError[Either[E, T]](JsResultException(e))
      } catch {
        case NonFatal(e) =>
          M.raiseError[Either[E, T]](new RuntimeException(s"The server returned an error: ${resp.code}, also can't parse as MatcherError", e))
      }
  }

  def parseTryResponseEither[E: Reads, T](resp: Response[Either[DeserializationError[JsError], T]]): F[Either[E, T]] = resp.rawErrorBody match {
    case Right(Right(r)) => M.pure(Right(r))
    case Right(Left(e))  => M.raiseError[Either[E, T]](new RuntimeException(s"The server returned success, but can't parse response: $e"))
    case Left(bytes) =>
      try Json.parse(bytes).validate[E] match {
        case JsSuccess(x, _) => M.pure(Left(x))
        case JsError(e)      => M.raiseError[Either[E, T]](JsResultException(e))
      } catch {
        case NonFatal(e) =>
          M.raiseError[Either[E, T]](new RuntimeException(s"The server returned an error: ${resp.code}, also can't parse as MatcherError", e))
      }
  }
}

object FOps {
  def apply[F[_]: CanWait: ThrowableMonadError]: FOps[F] = new FOps[F]
} 
Example 47
Source File: Main.scala    From jardiff   with Apache License 2.0 5 votes vote down vote up
package scala.tools.jardiff

import java.io.{ByteArrayOutputStream, File, PrintWriter}
import java.nio.file._

import org.apache.commons.cli
import org.apache.commons.cli.{CommandLine, DefaultParser, HelpFormatter, Options}
import org.eclipse.jgit.util.io.NullOutputStream

import scala.collection.JavaConverters.collectionAsScalaIterableConverter
import scala.util.Try
import scala.util.control.NonFatal

object Main {
  def main(args: Array[String]): Unit = {
    run(args) match {
      case ShowUsage(msg) => System.err.println(msg); sys.exit(-1)
      case Error(err) => err.printStackTrace(System.err); sys.exit(-1)
      case Success(diffFound) => sys.exit(if (diffFound) 1 else 0)
    }
  }

  private object Opts {
    val Help = new cli.Option("h", "help", false, "Display this message")
    val Git = new cli.Option("g", "git", true, "Directory to output a git repository containing the diff")
    Git.setArgName("dir")
    val NoCode = new cli.Option("c", "suppress-code", false, "Suppress method bodies")
    val Raw = new cli.Option("r", "raw", false, "Disable sorting and filtering of classfile contents")
    val NoPrivates = new cli.Option("p", "suppress-privates", false, "Display only non-private members")
    val ContextLines = new cli.Option("U", "unified", true, "Number of context lines in diff")
    val Quiet = new cli.Option("q", "quiet", false, "Don't output diffs to standard out")
    val Ignore = new cli.Option("i", "ignore", true, "File pattern to ignore rendered files in gitignore format")
    Ignore.setArgs(cli.Option.UNLIMITED_VALUES)
    ContextLines.setArgName("n")
    def apply(): Options = {
      new cli.Options().addOption(Help).addOption(Git).addOption(ContextLines).addOption(NoCode).addOption(Raw).addOption(NoPrivates).addOption(Quiet).addOption(Ignore)
    }
  }
  private implicit class RichCommandLine(val self: CommandLine) {
    def has(o: cli.Option): Boolean = self.hasOption(o.getOpt)
    def get(o: cli.Option): String = self.getOptionValue(o.getOpt)
    def getOptInt(o: cli.Option): Option[Int] = Option(self.getOptionValue(o.getOpt)).map(x => Try(x.toInt).getOrElse(throw new cli.ParseException("--" + o.getLongOpt + " requires an integer")))
  }

  private def helpText: String = {
    val formatter = new HelpFormatter
    val baos = new ByteArrayOutputStream()
    val writer = new PrintWriter(baos)
    try {
      val footer = s" VERSION1 [VERSION2 ...]\n\nEach VERSION may designate a single file, a directory, JAR file or a `${File.pathSeparator}`-delimited classpath\n\n"
      formatter.printHelp(writer, 80, "jardiff", footer, Opts(), HelpFormatter.DEFAULT_LEFT_PAD, HelpFormatter.DEFAULT_DESC_PAD, "", true)
      writer.flush()
      baos.toString().replaceFirst("\\n", "")

    } finally {
      writer.close()
    }
  }

  def run(args: Array[String]): RunResult = {
    val parser = new DefaultParser

    try {
      val line = parser.parse(Opts(), args)
      val trailingArgs = line.getArgList
      if (line.has(Opts.Help)) {
        ShowUsage(helpText)
      } else {
        val gitRepo = if (line.has(Opts.Git)) Some(Paths.get(line.get(Opts.Git))) else None
        val diffOutputStream = if (line.has(Opts.Quiet)) NullOutputStream.INSTANCE else System.out
        val config = JarDiff.Config(gitRepo, !line.has(Opts.NoCode), line.has(Opts.Raw),
          !line.has(Opts.NoPrivates), line.getOptInt(Opts.ContextLines), diffOutputStream,
          Option(line.getOptionValues(Opts.Ignore.getOpt)).toList.flatten
        )
        val paths = trailingArgs.asScala.toList.map(JarDiff.expandClassPath)
        paths match {
          case Nil => ShowUsage(helpText)
          case _ =>
            val jarDiff = JarDiff(paths, config)
            val diffFound = jarDiff.diff()
            Success(diffFound)
        }
      }
    } catch {
      case exp: cli.ParseException => ShowUsage(helpText)
      case NonFatal(t) => Error(t)
    }
  }
}

sealed abstract class RunResult
case class ShowUsage(msg: String) extends RunResult
case class Error(err: Throwable) extends RunResult
case class Success(diffFound: Boolean) extends RunResult 
Example 48
Source File: EmbeddedCassandra.scala    From phantom-activator-template   with Apache License 2.0 5 votes vote down vote up
package controllers

import java.io.File
import java.util.concurrent.atomic.AtomicBoolean

import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.slf4j.Logger

import scala.concurrent.blocking
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}


  def start(logger: Logger, config: Option[File] = None, timeout: Option[Int] = None): Unit = {
    this.synchronized {
      if (started.compareAndSet(false, true)) {
        blocking {
          val configFile = config.map(_.toURI.toString) getOrElse EmbeddedCassandraServerHelper.DEFAULT_CASSANDRA_YML_FILE
          System.setProperty("cassandra.config", configFile)
          Try {
            EmbeddedCassandraServerHelper.mkdirs()
          } match {
            case Success(value) => logger.info("Successfully created directories for embedded Cassandra.")
            case Failure(NonFatal(e)) =>
              logger.error(s"Error creating Embedded cassandra directories: ${e.getMessage}")
          }

          (config, timeout) match {
            case (Some(file), None) =>
              logger.info(s"Starting Cassandra in embedded mode with configuration from $file.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(
                file,
                EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR,
                EmbeddedCassandraServerHelper.DEFAULT_STARTUP_TIMEOUT
              )
            case (Some(file), Some(time)) =>
              logger.info(s"Starting Cassandra in embedded mode with configuration from $file and timeout set to $timeout ms.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(
                file,
                EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR,
                time
              )

            case (None, Some(time)) =>
              logger.info(s"Starting Cassandra in embedded mode with default configuration and timeout set to $timeout ms.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(time)
            case (None, None) =>
              logger.info("Starting Cassandra in embedded mode with default configuration.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra()
              logger.info("Successfully started embedded Cassandra")
          }
        }
      }
      else {
        logger.info("Embedded Cassandra has already been started")
      }
    }
  }


  def cleanup(logger: Logger): Unit = {
    this.synchronized {
      if (started.compareAndSet(true, false)) {
        logger.info("Cleaning up embedded Cassandra")
        EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
      } else {
        logger.info("Cassandra is not running, not cleaning up")
      }
    }
  }
} 
Example 49
Source File: TopicConfigurator.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import cats.Eq
import cats.data.Reader
import cats.instances.int._
import cats.instances.vector._
import cats.instances.try_._
import cats.syntax.eq._
import com.sky.kafka.configurator.error.{ReplicationChangeFound, TopicNotFound}
import com.typesafe.scalalogging.LazyLogging

import scala.util.control.NonFatal
import scala.util.{Failure, Success}

case class TopicConfigurator(topicReader: TopicReader, topicWriter: TopicWriter) extends LazyLogging {

  def configure(topic: Topic): Logger[Unit] =
    topicReader.fetch(topic.name) match {
      case Success(currentTopic) =>
        updateTopic(currentTopic, topic)
      case Failure(TopicNotFound(_)) =>
        topicWriter.create(topic)
          .withLog(s"Topic ${topic.name} was not found, so it has been created")
      case Failure(NonFatal(t)) =>
        Failure(t).asWriter
    }

  private def updateTopic(oldTopic: Topic, newTopic: Topic): Logger[Unit] = {

    def ifDifferent[T: Eq](oldValue: T, newValue: T)(updateOperation: (Topic, Topic) => Logger[Unit])(messageIfSame: String): Logger[Unit] =
      if (oldValue =!= newValue)
        updateOperation(oldTopic, newTopic)
      else
        Success(()).withLog(messageIfSame)

    import TopicConfigurator._

    for {
      _ <- ifDifferent(oldTopic.replicationFactor, newTopic.replicationFactor)(failReplicationChange)(s"Replication factor unchanged for ${newTopic.name}.")
      _ <- ifDifferent(oldTopic.partitions, newTopic.partitions)(updatePartitions)(s"No change in number of partitions for ${newTopic.name}")
      _ <- ifDifferent(oldTopic.config, newTopic.config)(updateConfig)(s"No change in config for ${newTopic.name}")
    } yield ()
  }

  private def failReplicationChange(oldTopic: Topic, newTopic: Topic): Logger[Unit] =
    Failure(ReplicationChangeFound).asWriter

  private def updatePartitions(oldTopic: Topic, newTopic: Topic): Logger[Unit] =
    topicWriter
      .updatePartitions(newTopic.name, newTopic.partitions)
      .withLog(s"Updated topic ${newTopic.name} from ${oldTopic.partitions} to ${newTopic.partitions} partition(s)")

  private def updateConfig(oldTopic: Topic, newTopic: Topic): Logger[Unit] =
    topicWriter
      .updateConfig(newTopic.name, newTopic.config)
      .withLog(s"Updated configuration of topic ${newTopic.name}")
}

object TopicConfigurator {
  def reader: Reader[AppConfig, TopicConfigurator] = KafkaTopicAdmin.reader
    .map(kafkaAdminClient => TopicConfigurator(kafkaAdminClient, kafkaAdminClient))

  private implicit val topicConfigIsContained: Eq[Map[String, String]] = Eq.instance { case (left, right) =>
    left.toList.forall(right.toList.contains(_)) || right.toList.forall(left.toList.contains(_))
  }
} 
Example 50
Source File: ParquetRecordDecoder.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import shapeless.labelled.{FieldType, field}
import shapeless.{::, HList, HNil, LabelledGeneric, Lazy, Witness}

import scala.language.higherKinds
import scala.util.control.NonFatal


  def decode(record: RowParquetRecord, configuration: ValueCodecConfiguration): T

}

object ParquetRecordDecoder {

  object DecodingException {
    def apply(msg: String, cause: Throwable): DecodingException = {
      val decodingException = DecodingException(msg)
      decodingException.initCause(cause)
      decodingException
    }
  }

  case class DecodingException(msg: String) extends RuntimeException(msg)

  def apply[T](implicit ev: ParquetRecordDecoder[T]): ParquetRecordDecoder[T] = ev

  def decode[T](record: RowParquetRecord, configuration: ValueCodecConfiguration = ValueCodecConfiguration.default)
               (implicit ev: ParquetRecordDecoder[T]): T = ev.decode(record, configuration)

  implicit val nilDecoder: ParquetRecordDecoder[HNil] = new ParquetRecordDecoder[HNil] {
    override def decode(record: RowParquetRecord, configuration: ValueCodecConfiguration): HNil.type = HNil
  }

  implicit def headValueDecoder[FieldName <: Symbol, Head, Tail <: HList](implicit
                                                                          witness: Witness.Aux[FieldName],
                                                                          headDecoder: ValueCodec[Head],
                                                                          tailDecoder: ParquetRecordDecoder[Tail]
                                                                         ): ParquetRecordDecoder[FieldType[FieldName, Head] :: Tail] =
    new ParquetRecordDecoder[FieldType[FieldName, Head] :: Tail] {
      override def decode(record: RowParquetRecord, configuration: ValueCodecConfiguration): FieldType[FieldName, Head] :: Tail = {
        val fieldName = witness.value.name
        val decodedFieldValue = try {
          record.get[Head](fieldName, configuration)
        } catch {
          case NonFatal(cause) =>
            throw DecodingException(s"Failed to decode field $fieldName of record: $record", cause)
        }
        field[FieldName](decodedFieldValue) :: tailDecoder.decode(record, configuration)
      }
    }

  implicit def genericDecoder[A, R](implicit
                                    gen: LabelledGeneric.Aux[A, R],
                                    decoder: Lazy[ParquetRecordDecoder[R]]
                                   ): ParquetRecordDecoder[A] =
    new ParquetRecordDecoder[A] {
      override def decode(record: RowParquetRecord, configuration: ValueCodecConfiguration): A =
        gen.from(decoder.value.decode(record, configuration))
    }

} 
Example 51
Source File: ParquetRecordEncoder.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import shapeless.labelled.FieldType
import shapeless.{::, HList, HNil, LabelledGeneric, Lazy, Witness}

import scala.language.higherKinds
import scala.util.control.NonFatal



  def encode(entity: T, configuration: ValueCodecConfiguration): RowParquetRecord

}

object ParquetRecordEncoder {

  object EncodingException {
    def apply(msg: String, cause: Throwable): EncodingException = {
      val encodingException = EncodingException(msg)
      encodingException.initCause(cause)
      encodingException
    }
  }

  case class EncodingException(msg: String) extends RuntimeException(msg)

  def apply[T](implicit ev: ParquetRecordEncoder[T]): ParquetRecordEncoder[T] = ev

  def encode[T](entity: T, configuration: ValueCodecConfiguration = ValueCodecConfiguration.default)
               (implicit ev: ParquetRecordEncoder[T]): RowParquetRecord = ev.encode(entity, configuration)

  implicit val nilDEncoder: ParquetRecordEncoder[HNil] = new ParquetRecordEncoder[HNil] {
    override def encode(nil: HNil, configuration: ValueCodecConfiguration): RowParquetRecord = RowParquetRecord()
  }

  implicit def headValueEncoder[FieldName <: Symbol, Head, Tail <: HList](implicit
                                                                          witness: Witness.Aux[FieldName],
                                                                          headEncoder: ValueCodec[Head],
                                                                          tailEncoder: ParquetRecordEncoder[Tail]
                                                                         ): ParquetRecordEncoder[FieldType[FieldName, Head] :: Tail] =
    new ParquetRecordEncoder[FieldType[FieldName, Head] :: Tail] {
      override def encode(entity: FieldType[FieldName, Head] :: Tail, configuration: ValueCodecConfiguration): RowParquetRecord = {
        val fieldName = witness.value.name
        val fieldValue = try {
          headEncoder.encode(entity.head, configuration)
        } catch {
          case NonFatal(cause) =>
            throw EncodingException(s"Failed to encode field $fieldName: ${entity.head}, due to ${cause.getMessage}", cause)
        }
        tailEncoder.encode(entity.tail, configuration).prepend(fieldName, fieldValue)
      }
    }

  implicit def genericEncoder[A, R](implicit
                                    gen: LabelledGeneric.Aux[A, R],
                                    encoder: Lazy[ParquetRecordEncoder[R]]
                                   ): ParquetRecordEncoder[A] =
    new ParquetRecordEncoder[A] {
      override def encode(entity: A, configuration: ValueCodecConfiguration): RowParquetRecord =
        encoder.value.encode(gen.to(entity), configuration)
    }

} 
Example 52
Source File: Spec.scala    From monadless   with Apache License 2.0 5 votes vote down vote up
package io.monadless

import scala.util.Try
import scala.util.control.NonFatal

import org.scalatest.MustMatchers
import io.monadless.impl.TestSupport

trait Spec
  extends org.scalatest.FreeSpec
  with MustMatchers
  with Monadless[Try]
  with TestSupport[Try] {

  def apply[T](v: => T) = Try(v)

  def collect[T](list: List[Try[T]]): Try[List[T]] =
    list.foldLeft(Try(List.empty[T])) {
      (acc, item) =>
        for {
          l <- acc
          i <- item
        } yield l :+ i
    }

  def get[T](m: Try[T]): T = m.get

  def rescue[T](m: Try[T])(pf: PartialFunction[Throwable, Try[T]]) = m.recoverWith(pf)

  def ensure[T](m: Try[T])(f: => Unit) =
    m.map { r =>
      try f
      catch {
        case NonFatal(e) => ()
      }
      r
    }
} 
Example 53
Source File: MonadlessTry.scala    From monadless   with Apache License 2.0 5 votes vote down vote up
package io.monadless.stdlib

import io.monadless.Monadless
import scala.util.Try
import scala.util.control.NonFatal

trait MonadlessTry extends Monadless[Try] {

  def collect[T](list: List[Try[T]]): Try[List[T]] =
    list.foldLeft(Try(List.empty[T])) {
      (acc, item) =>
        for {
          l <- acc
          i <- item
        } yield l :+ i
    }

  def rescue[T](m: Try[T])(pf: PartialFunction[Throwable, Try[T]]) =
    m.recoverWith(pf)

  def ensure[T](m: Try[T])(f: => Unit) = {
    try f
    catch {
      case NonFatal(e) => ()
    }
    m
  }
}

object MonadlessTry extends MonadlessTry 
Example 54
Source File: Cp.scala    From benchmarks   with Apache License 2.0 5 votes vote down vote up
package com.rossabaker
package benchmarks

import org.openjdk.jmh.annotations._

@State(Scope.Thread)
@Fork(2)
@Measurement(iterations = 10)
@Warmup(iterations = 10)
@Threads(1)
class Cp extends BenchmarkUtils {
  @Benchmark
  def fs2Sync(): Unit = {
    import _root_.fs2._, Stream._
    import java.nio.file.Paths
    io.file.readAll[Task](Paths.get("testdata/lorem-ipsum.txt"), 4096)
      .to(io.file.writeAll[Task](Paths.get("out/lorem-ipsum.txt")))
      .run
      .unsafeRun
  }

  @Benchmark
  def fs2Async(): Unit = {
    import _root_.fs2._, Stream._
    import java.nio.file.Paths
    io.file.readAllAsync[Task](Paths.get("testdata/lorem-ipsum.txt"), 4096)
      .to(io.file.writeAllAsync[Task](Paths.get("out/lorem-ipsum.txt")))
      .run
      .unsafeRun
  }

  @Benchmark
  def scalazStreamIo(): Unit = {
    import _root_.scalaz.stream._, Process._
    constant(4096)
      .through(io.fileChunkR("testdata/lorem-ipsum.txt"))
      .to(io.fileChunkW("out/lorem-ipsum.txt"))
      .run
      .unsafePerformSync
  }

  @Benchmark
  def scalazStreamNio(): Unit = {
    import _root_.scalaz.stream._, Process._
    constant(4096)
      .through(nio.file.chunkR("testdata/lorem-ipsum.txt"))
      .to(nio.file.chunkW("out/lorem-ipsum.txt"))
      .run
      .unsafePerformSync
  }

   }
            callback.onError(ex)
          }

          def onComplete(): Unit = {
            try {
              out.close()
              callback.onSuccess(())
            } catch {
              case NonFatal(ex) =>
                callback.onError(ex)
            }
          }
        }
      }

    Await.result(
      copyFile(new File("testdata/lorem-ipsum.txt"), new File("out/lorem-ipsum.txt"), 4096)
        .runAsync(monixScheduler),
      Duration.Inf
    )
  }
} 
Example 55
Source File: Rules.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package dev.tauri.seals
package laws

import scala.util.control.NonFatal
import cats.kernel.Eq
import cats.kernel.laws._
import cats.kernel.laws.discipline._
import org.scalacheck.{ Arbitrary, Prop }
import org.scalacheck.Prop._

object Rules extends Serialization {

  def serializable[A: Arbitrary]: (String, Prop) = {
    "serializable" -> forAll { (a: A) =>
      withCatchNonFatal {
        val _: A = roundtripSer(a)
        Prop(Result(status = True))
      }
    }
  }

  def equalitySerializable[A : Arbitrary : Eq]: (String, Prop) = {
    "serialize-roundtrip-Eq" -> forAll { (a: A) =>
      withCatchNonFatal {
        val r: A = roundtripSer(a)
        r <-> a
      }
    }
  }

  def identitySerializable[A <: AnyRef : Arbitrary](a: A): (String, Prop) = {
    "serializable-roundtrip-identity" -> forAll { (a: A) =>
      withCatchNonFatal {
        val r: A = roundtripSer(a)
        Prop(Result(status = if (r eq a) True else False))
      }
    }
  }

  private def withCatchNonFatal(block: => Prop): Prop = {
    try {
      block
    } catch {
      case NonFatal(ex) =>
        Prop(Result(status = Exception(ex)))
      case ex: Throwable =>
        throw ex // workaround for -Xstrict-patmat-analysis problem
    }
  }
} 
Example 56
Source File: PhaseCache.scala    From Converter   with GNU General Public License v3.0 5 votes vote down vote up
package org.scalablytyped.converter.internal.phases

import java.nio.channels.{ClosedByInterruptException, FileLockInterruptionException}
import java.util

import org.scalablytyped.converter.internal.phases.PhaseCache.Ref

import scala.concurrent.{ExecutionException, Future, Promise}
import scala.util.control.NonFatal

class PhaseCache[Id, U](initialCapacity: Int = 1000) {
  private val m: util.Map[Ref[(Id, IsCircular)], Ref[Future[PhaseRes[Id, U]]]] =
    new util.HashMap(initialCapacity)

  def getOrElse(key: (Id, IsCircular))(compute: Promise[PhaseRes[Id, U]] => Unit): Future[PhaseRes[Id, U]] = {
    val keyRef = new Ref(key)
    var op: Option[Promise[PhaseRes[Id, U]]] = None

    val ret = synchronized {
      val existingFuture: Option[Future[PhaseRes[Id, U]]] =
        m.get(keyRef) match {
          case null => None
          case uRef =>
            uRef.get match {
              case null => None
              case u    => Some(u)
            }
        }

      existingFuture match {
        case None =>
          val p      = Promise[PhaseRes[Id, U]]()
          val future = p.future
          m.put(keyRef, new Ref(future))
          op = Some(p)
          future
        case Some(found) => found
      }
    }

    op.foreach { p =>
      try compute(p)
      catch {
        case x: FileLockInterruptionException            => throw x
        case x: InterruptedException                     => throw x
        case x: ClosedByInterruptException               => throw x
        case x: ExecutionException if x.getCause != null => p.failure(x.getCause)
        case NonFatal(th) => p.failure(th)
      }
    }

    ret
  }
}

object PhaseCache {
  private final class Ref[T](t: T) extends java.lang.ref.SoftReference[T](t) {
    override def equals(obj: Any): Boolean =
      obj match {
        case that: Ref[_] => that.get == get
        case _ => false
      }

    override def hashCode: Int = get.##
  }
} 
Example 57
Source File: PhaseRes.scala    From Converter   with GNU General Public License v3.0 5 votes vote down vote up
package org.scalablytyped.converter.internal.phases

import java.nio.channels.{ClosedByInterruptException, FileLockInterruptionException}

import com.olvind.logging.Logger
import com.olvind.logging.Logger.LoggedException

import scala.collection.immutable.{SortedMap, TreeMap}
import scala.concurrent.ExecutionException
import scala.util.control.NonFatal

sealed trait PhaseRes[Id, T] extends Product with Serializable {
  import PhaseRes._

  def foreach(f: T => Unit): PhaseRes[Id, Unit] = map(f)

  def map[U](f: T => U): PhaseRes[Id, U] =
    this match {
      case Ok(value)       => Ok(f(value))
      case Ignore()        => Ignore()
      case Failure(errors) => Failure(errors)
    }

  def flatMap[U](f: T => PhaseRes[Id, U]): PhaseRes[Id, U] =
    this match {
      case Ok(value)       => f(value)
      case Ignore()        => Ignore()
      case Failure(errors) => Failure(errors)
    }
}

object PhaseRes {
  final case class Ok[Id, T](value: T) extends PhaseRes[Id, T]
  final case class Ignore[Id, T]() extends PhaseRes[Id, T]
  final case class Failure[Id, T](errors: Map[Id, Either[Throwable, String]]) extends PhaseRes[Id, T]

  def fromEither[Id, L, R](id: Id, e: Either[String, R]): PhaseRes[Id, R] =
    e match {
      case Right(value) => Ok(value)
      case Left(error)  => Failure(Map(id -> Right(error)))
    }

  def fromOption[Id, T](id: Id, e: Option[T], onEmpty: => Either[Throwable, String]): PhaseRes[Id, T] =
    e match {
      case Some(value) => Ok(value)
      case None        => Failure(Map(id -> onEmpty))
    }

  def sequenceSet[Id, T](rs: Set[PhaseRes[Id, T]]): PhaseRes[Id, Set[T]] =
    rs.foldLeft[PhaseRes[Id, Set[T]]](Ok(Set.empty)) {
      case (other, Ignore())                 => other
      case (Ok(ts), Ok(t))                   => Ok(ts + t)
      case (Ok(_), Failure(errors))          => Failure(errors)
      case (Failure(errors), Failure(error)) => Failure(errors ++ error)
      case (error @ Failure(_), Ok(_))       => error
      case (Ignore(), Ok(t))                 => Ok(Set(t))
      case (Ignore(), Failure(error))        => Failure(error)
    }

  def sequenceMap[Id: Ordering, T](rs: SortedMap[Id, PhaseRes[Id, T]]): PhaseRes[Id, SortedMap[Id, T]] =
    rs.foldLeft[PhaseRes[Id, SortedMap[Id, T]]](Ok(TreeMap.empty[Id, T])) {
      case (other, (_, Ignore()))                    => other
      case (Ok(map), (id, Ok(value)))                => Ok(map + ((id, value)))
      case (Ok(_), (_, Failure(errors)))             => Failure(errors)
      case (Failure(errors1), (_, Failure(errors2))) => Failure(errors1 ++ errors2)
      case (error @ Failure(_), _)                   => error
      case (Ignore(), (id, Ok(value)))               => Ok(TreeMap((id, value)))
      case (Ignore(), (_, Failure(errors)))          => Failure(errors)
    }

  def attempt[Id, T](id: Id, logger: Logger[Unit], t: => PhaseRes[Id, T]): PhaseRes[Id, T] =
    try t
    catch {
      case x: InterruptedException => throw x
      case x: ClosedByInterruptException => throw x
      case x: FileLockInterruptionException => throw x
      case x: ExecutionException if x.getCause != null =>
        val th = x.getCause
        logger.error(s"Caught exception: ${th.getMessage}", th)
        Failure[Id, T](Map(id -> Left(th)))
      case th: LoggedException =>
        Failure[Id, T](Map(id -> Left(th)))
      case NonFatal(th) =>
        logger.error(s"Caught exception: ${th.getMessage}", th)
        Failure[Id, T](Map(id -> Left(th)))
      case th: StackOverflowError =>
        logger.error("StackOverflowError", th)
        Failure[Id, T](Map(id -> Left(th)))
    }
} 
Example 58
Source File: TypeExpansionHarness.scala    From Converter   with GNU General Public License v3.0 5 votes vote down vote up
package org.scalablytyped.converter.internal
package ts

import java.io.StringWriter

import com.olvind.logging
import com.olvind.logging.Logger
import org.scalablytyped.converter.Selection
import org.scalablytyped.converter.internal.importer.Phase1ReadTypescript
import org.scalablytyped.converter.internal.ts.parser.TsParser
import org.scalablytyped.converter.internal.ts.transforms.SetCodePath

import scala.reflect.ClassTag
import scala.util.control.NonFatal

trait TypeExpansionHarness {
  val parser  = new TsParser(None)
  val libName = TsIdentLibrarySimple("testing")

  def Transformations(logger: Logger[Unit]): List[TsParsedFile => TsParsedFile] =
    Phase1ReadTypescript.Pipeline(
      scope              = TsTreeScope(libName, pedantic = true, Map(), logger),
      libName            = libName,
      expandTypeMappings = Selection.All,
      involvesReact      = true,
    )

  def run(input: String): TsParsedFile = {
    val parsed       = parser(input).get
    val logger       = logging.stringWriter()
    val withCodePath = SetCodePath.visitTsParsedFile(CodePath.HasPath(libName, TsQIdent.empty))(parsed)

    try Transformations(logger.void).foldLeft(withCodePath) { case (acc, f) => f(acc) } catch {
      case NonFatal(th) =>
        println(logger.underlying.toString)
        throw th
    }
  }

  implicit class Extractor(x: TsParsedFile) {
    def extract[T: ClassTag](name: String): T =
      x.membersByName.get(TsIdentSimple(name)) match {
        case Some(ofName) =>
          val ofType = ofName.collectFirst { case x: T => x }
          ofType match {
            case Some(found) => found
            case None        => sys.error(s"Could not find ${implicitly[ClassTag[T]].runtimeClass} among $ofType")
          }
        case None => sys.error(s"No member with name $name among ${x.membersByName.keys}")
      }
  }
} 
Example 59
Source File: ReliableHttpProxyFactory.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkahttp.proxy

import akka.NotUsed
import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import org.slf4j.LoggerFactory
import rhttpc.client.protocol.{Correlated, Request}
import rhttpc.client.proxy._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

object ReliableHttpProxyFactory {

  private lazy val logger = LoggerFactory.getLogger(getClass)

  def send(successRecognizer: SuccessHttpResponseRecognizer, batchSize: Int, parallelConsumers: Int)
          (request: Request[HttpRequest])
          (implicit actorSystem: ActorSystem, materialize: Materializer): Future[HttpResponse] = {
    import actorSystem.dispatcher
    send(prepareHttpFlow(batchSize * parallelConsumers), successRecognizer)(request.correlated)
  }

  private def prepareHttpFlow(parallelism: Int)
                             (implicit actorSystem: ActorSystem, materialize: Materializer):
    Flow[(HttpRequest, String), HttpResponse, NotUsed] = {

    import actorSystem.dispatcher
    Http().superPool[String]().mapAsync(parallelism) {
      case (tryResponse, id) =>
        tryResponse match {
          case Success(response) =>
            response.toStrict(1 minute)
          case Failure(ex) =>
            Future.failed(ex)
        }
    }
  }

  private def send(httpFlow: Flow[(HttpRequest, String), HttpResponse, Any], successRecognizer: SuccessHttpResponseRecognizer)
                  (corr: Correlated[HttpRequest])
                  (implicit ec: ExecutionContext, materialize: Materializer): Future[HttpResponse] = {
    import collection.JavaConverters._
    logger.debug(
      s"""Sending request for ${corr.correlationId} to ${corr.msg.getUri()}. Headers:
         |${corr.msg.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${corr.msg.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
    val logResp = logResponse(corr) _
    val responseFuture = Source.single((corr.msg, corr.correlationId)).via(httpFlow).runWith(Sink.head)
    responseFuture.onComplete {
      case Failure(ex) =>
        logger.error(s"Got failure for ${corr.correlationId} to ${corr.msg.getUri()}", ex)
      case Success(_) =>
    }
    for {
      response <- responseFuture
      transformedToFailureIfNeed <- {
        if (successRecognizer.isSuccess(response)) {
          logResp(response, "success response")
          Future.successful(response)
        } else {
          logResp(response, "response recognized as non-success")
          Future.failed(NonSuccessResponse)
        }
      }
    } yield transformedToFailureIfNeed
  }

  private def logResponse(corr: Correlated[HttpRequest])
                         (response: HttpResponse, additionalInfo: String): Unit = {
    import collection.JavaConverters._
    logger.debug(
      s"""Got $additionalInfo for ${corr.correlationId} to ${corr.msg.getUri()}. Status: ${response.status.value}. Headers:
         |${response.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${response.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
  }

} 
Example 60
Source File: QueueActor.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.pattern._
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Stash}
import akka.routing.{RoundRobinRoutingLogic, Routee, Router}
import akka.util.Timeout
import rhttpc.transport.{Message, RejectingMessage}

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

private class QueueActor(consumeTimeout: FiniteDuration,
                         retryDelay: FiniteDuration) extends Actor with Stash with ActorLogging {

  import context.dispatcher

  private var consumers = Map.empty[ActorRef, AskingActorRefRouteeWithSpecifiedMessageType]

  private var router = Router(RoundRobinRoutingLogic(), collection.immutable.IndexedSeq.empty)

  override def receive: Receive = {
    case RegisterConsumer(consumer, fullMessage) =>
      val routee = AskingActorRefRouteeWithSpecifiedMessageType(consumer, consumeTimeout, handleResponse, fullMessage)
      consumers += consumer -> routee
      router = router.addRoutee(routee)
      log.debug(s"${self.path.name}: registered consumer, unstashing")
      unstashAll()
    case UnregisterConsumer(consumer) =>
      log.debug(s"${self.path.name}: unregistered consumer")
      consumers.get(consumer).foreach { routee =>
        consumers -= consumer
        router = router.removeRoutee(routee)
      }
      sender() ! ((): Unit)
    case msg: Message[_] =>
      if (consumers.isEmpty) {
        log.debug(s"${self.path.name}: got message when no consumer registered, stashing")
        stash()
        implicit val timeout = Timeout(consumeTimeout)
        sender() ! ((): Unit)
      } else {
        router.route(msg, sender())
      }
  }

  private def handleResponse(future: Future[Any], msg: Message[_]): Unit =
    future.recover {
      case ex: AskTimeoutException =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of ask timeout")
      case ex: Exception with RejectingMessage =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of rejecting failure")
      case NonFatal(ex) =>
        log.error(ex, s"${self.path.name}: will RETRY [${msg.content.getClass.getName}] after $retryDelay because of failure")
        context.system.scheduler.scheduleOnce(retryDelay, self, msg)
    }

}

object QueueActor {
  def props(consumeTimeout: FiniteDuration,
            retryDelay: FiniteDuration): Props = Props(
    new QueueActor(
      consumeTimeout = consumeTimeout,
      retryDelay = retryDelay))
}

private[inmem] case class AskingActorRefRouteeWithSpecifiedMessageType(ref: ActorRef,
                                                                       askTimeout: FiniteDuration,
                                                                       handleResponse: (Future[Any], Message[_]) => Unit,
                                                                       fullMessage: Boolean)
  extends Routee {

  override def send(message: Any, sender: ActorRef): Unit = {
    val typedMessage = message.asInstanceOf[Message[_]]
    val msgToSend = if (fullMessage) message else typedMessage.content
    handleResponse(ref.ask(msgToSend)(askTimeout, sender), typedMessage)
  }
}

private[inmem] case class RegisterConsumer(consumer: ActorRef, fullMessage: Boolean)

private[inmem] case class UnregisterConsumer(consumer: ActorRef) 
Example 61
Source File: TransportActor.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.actor.{Actor, Props, Status}

import scala.util.control.NonFatal

private class TransportActor(queueActorProps: => Props) extends Actor {

  override def receive: Receive = {
    case GetOrCreateQueue(name) =>
      try {
        val ref = context.child(name).getOrElse(context.actorOf(queueActorProps, name))
        sender() ! ref
      } catch {
        case NonFatal(ex) =>
          sender() ! Status.Failure(ex)
      }
  }

}

object TransportActor {
  def props(queueActorProps: => Props): Props =
    Props(new TransportActor(queueActorProps))
}

private[inmem] case class GetOrCreateQueue(name: String) 
Example 62
Source File: AmqpJdbcScheduler.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc

import akka.actor.{Cancellable, Scheduler}
import org.slf4j.LoggerFactory
import rhttpc.transport.SerializingPublisher.SerializedMessage
import rhttpc.transport._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}

private[amqpjdbc] trait AmqpJdbcScheduler[PubMsg] {

  def schedule(msg: Message[PubMsg], delay: FiniteDuration): Future[Unit]

  def start(): Unit

  def stop(): Future[Unit]

}

private[amqpjdbc] class AmqpJdbcSchedulerImpl[PubMsg](scheduler: Scheduler,
                                                      checkInterval: FiniteDuration,
                                                      repo: ScheduledMessagesRepository,
                                                      queueName: String,
                                                      batchSize: Int,
                                                      publisher: SerializingPublisher[PubMsg])
                                                     (implicit ec: ExecutionContext,
                                                      serializer: Serializer[PubMsg]) extends AmqpJdbcScheduler[PubMsg] {
  private val logger = LoggerFactory.getLogger(getClass)

  private var ran: Boolean = false
  private var scheduledCheck: Option[Cancellable] = None
  private var currentPublishedFetchedFuture: Future[Int] = Future.successful(0)

  override def schedule(msg: Message[PubMsg], delay: FiniteDuration): Future[Unit] = {
    val serialized = serializer.serialize(msg.content)
    repo.save(MessageToSchedule(queueName, serialized, msg.properties, delay))
  }

  override def start(): Unit = {
    synchronized {
      if (!ran) {
        ran = true
        publishFetchedMessagesThanReschedule()
      }
    }
  }

  private def publishFetchedMessagesThanReschedule(): Unit = {
    synchronized {
      if (ran) {
        val publishedFetchedFuture = repo.fetchMessagesShouldByRun(queueName, batchSize)(publish)
        currentPublishedFetchedFuture = publishedFetchedFuture
        publishedFetchedFuture onComplete handlePublicationResult
      }
    }
  }

  private def publish(messages: Seq[ScheduledMessage]): Future[Seq[Unit]] = {
    if (messages.nonEmpty) {
      logger.debug(s"Fetched ${messages.size}, publishing")
    }
    val handlingFutures = messages.map { message =>
      publisher.publishSerialized(SerializedMessage(message.content.getBytes(), message.properties))
    }
    Future.sequence(handlingFutures)
  }

  private def handlePublicationResult(tryResult: Try[Int]): Unit = {
    tryResult match {
      case Failure(ex) =>
        logger.error("Exception while publishing fetched messages", ex)
      case _ =>
    }
    synchronized {
      if (ran) {
        scheduledCheck = Some(scheduler.scheduleOnce(checkInterval)(publishFetchedMessagesThanReschedule()))
      } else {
        logger.debug(s"Scheduler is stopping, next check will be skipped")
      }
    }
  }

  override def stop(): Future[Unit] = {
    synchronized {
      scheduledCheck.foreach(_.cancel())
      ran = false
      currentPublishedFetchedFuture.map(_ => Unit)
    }
  }

} 
Example 63
Source File: Recovered.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.utils

import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object Recovered {

  private lazy val logger = LoggerFactory.getLogger(getClass)

  def recovered(action: String, run: => Unit): Unit = {
    try {
      run
    } catch {
      case NonFatal(ex) =>
        logger.error(s"Exception while $action", ex)

    }
  }

  def recoveredFuture(action: String, future: => Future[Unit])
                     (implicit ec: ExecutionContext): Future[Unit] = {
    try {
      future.recover {
        case NonFatal(ex) =>
          logger.error(s"Exception while $action", ex)
      }
    } catch {
      case NonFatal(ex) => // while preparing future
        logger.error(s"Exception while $action", ex)
        Future.successful(Unit)
    }
  }

} 
Example 64
Source File: FallbackPublisher.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.fallback

import akka.actor.{ActorSystem, Scheduler}
import akka.pattern.CircuitBreaker
import org.slf4j.LoggerFactory
import rhttpc.transport.{Message, Publisher}

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

private[fallback] class FallbackPublisher[Msg](main: Publisher[Msg],
                                               fallback: Publisher[Msg])
                                              (maxFailures: Int,
                                               callTimeout: FiniteDuration,
                                               resetTimeout: FiniteDuration)
                                              (implicit system: ActorSystem) extends Publisher[Msg] {

  import system.dispatcher

  private val logger = LoggerFactory.getLogger(getClass)

  private val circuitBreaker = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout)
    .onOpen(logger.debug("Circuit opened"))
    .onHalfOpen(logger.debug("Circuit half-opened"))
    .onClose(logger.debug("Circuit closed"))

  override def publish(msg: Message[Msg]): Future[Unit] = {
    circuitBreaker.withCircuitBreaker(main.publish(msg)).recoverWith {
      case NonFatal(ex) =>
        logger.debug(s"Circuit is opened, sending message [${msg.getClass.getName}] to fallback transport")
        fallback.publish(msg)
    }
  }

  override def start(): Unit = {
    main.start()
    fallback.start()
  }

  override def stop(): Future[Unit] = {
    import rhttpc.utils.Recovered._
    recoveredFuture("stopping main publisher", main.stop())
      .flatMap(_ => recoveredFuture("stopping fallback publisher", fallback.stop()))
  }
} 
Example 65
Source File: HttpProbe.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.test

import com.ning.http.client.{AsyncHttpClient, AsyncHttpClientConfig}
import dispatch._
import dispatch.Defaults.timer

import scala.concurrent._
import scala.concurrent.duration._
import scala.util.control.NonFatal

case class HttpProbe(urlStr: String) {
  private val httpClient = new Http(new AsyncHttpClient(
    new AsyncHttpClientConfig.Builder()
      .setConnectTimeout(500)
      .setRequestTimeout(500)
      .build()))

  def await(atMostSeconds: Int = 15)(implicit ec: ExecutionContext) = {
    val future = retry.Pause(max = atMostSeconds * 2) { () => // default delay is 0,5 sec
      httpClient(url(urlStr) OK as.String).map(Some(_)).recover {
        case NonFatal(ex) => None
      }
    }
    Await.result(future, atMostSeconds seconds)
  }

} 
Example 66
Source File: MessageConsumer.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.client.consume

import akka.actor._
import akka.pattern._
import rhttpc.utils.Recovered
import Recovered._
import rhttpc.client._
import rhttpc.client.config.ConfigParser
import rhttpc.client.protocol.{Correlated, Exchange}
import rhttpc.transport.{Deserializer, InboundQueueData, PubSubTransport, Subscriber}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.control.NonFatal

class MessageConsumer[Request, Response](subscriberForConsumer: ActorRef => Subscriber[Correlated[Exchange[Request, Response]]],
                                         handleMessage: Exchange[Request, Response] => Future[Unit])
                                        (implicit actorSystem: ActorSystem){

  private val consumingActor = actorSystem.actorOf(Props(new Actor {
    import context.dispatcher

    override def receive: Receive = {
      case correlated: Correlated[_] =>
        try {
          handleMessage(correlated.asInstanceOf[Correlated[Exchange[Request, Response]]].msg) pipeTo sender()
        } catch {
          case NonFatal(ex) =>
            sender() ! Status.Failure(ex)
        }
    }
    
  }))


  private val subscriber = subscriberForConsumer(consumingActor)

  def start() {
    subscriber.start()
  }

  def stop(): Future[Unit] = {
    import actorSystem.dispatcher
    recoveredFuture("stopping message subscriber", subscriber.stop())
      .flatMap(_ => recoveredFuture("stopping message consumer actor", gracefulStop(consumingActor, 30 seconds).map(_ => Unit)))
  }

}

case class MessageConsumerFactory()(implicit actorSystem: ActorSystem) {
  
  private lazy val config = ConfigParser.parse(actorSystem)
  
  def create[Request, Response](handleMessage: Exchange[Request, Response] => Future[Unit],
                                batchSize: Int = config.batchSize,
                                parallelConsumers: Int = config.parallelConsumers,
                                queuesPrefix: String = config.queuesPrefix)
                               (implicit transport: PubSubTransport,
                                deserializer: Deserializer[Correlated[Exchange[Request, Response]]]): MessageConsumer[Request, Response] = {
    new MessageConsumer(prepareSubscriber(transport, batchSize, parallelConsumers, queuesPrefix), handleMessage)
  }

  private def prepareSubscriber[Request, Response](transport: PubSubTransport,
                                                   batchSize: Int,
                                                   parallelConsumers: Int,
                                                   queuesPrefix: String)
                                                  (implicit actorSystem: ActorSystem,
                                                   deserializer: Deserializer[Correlated[Exchange[Request, Response]]]):
  (ActorRef) => Subscriber[Correlated[Exchange[Request, Response]]] =
    transport.subscriber[Correlated[_]](InboundQueueData(QueuesNaming.prepareResponseQueueName(queuesPrefix), batchSize, parallelConsumers), _)
      .asInstanceOf[Subscriber[Correlated[Exchange[Request, Response]]]]

} 
Example 67
Source File: HdfsWatcher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hdfs

import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import io.eels.util.HdfsIterator
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hdfs.client.HdfsAdmin
import org.apache.hadoop.hdfs.inotify.Event

import scala.concurrent.duration._
import scala.util.control.NonFatal

class HdfsWatcher(path: Path, callback: FileCallback)
                 (implicit fs: FileSystem, conf: Configuration) extends Logging {

  private val files = HdfsIterator.remote(fs.listFiles(path, false)).map(_.getPath).toBuffer
  files.foreach(callback.onStart)

  private val executor = Executors.newSingleThreadExecutor()
  private val running = new AtomicBoolean(true)
  private val interval = 5.seconds

  private val admin = new HdfsAdmin(path.toUri, conf)
  private val eventStream = admin.getInotifyEventStream

  executor.submit(new Runnable {
    override def run(): Unit = {
      while (running.get) {
        try {
          Thread.sleep(interval.toMillis)
          val events = eventStream.take
          for (event <- events.getEvents) {
            event match {
              case create: Event.CreateEvent => callback.onCreate(create)
              case append: Event.AppendEvent => callback.onAppend(append)
              case rename: Event.RenameEvent => callback.onRename(rename)
              case close: Event.CloseEvent => callback.onClose(close)
              case _ =>
            }
          }
        } catch {
          case NonFatal(e) => logger.error("Error while polling fs", e)
        }
      }
    }
  })

  def stop(): Unit = {
    running.set(false)
    executor.shutdownNow()
  }
}

trait FileCallback {
  def onStart(path: Path): Unit
  def onClose(close: Event.CloseEvent): Unit
  def onRename(rename: Event.RenameEvent): Unit
  def onAppend(append: Event.AppendEvent): Unit
  def onCreate(path: Event.CreateEvent): Unit
} 
Example 68
Source File: RowPartitionFn.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive.partition

import com.sksamuel.exts.Logging
import io.eels.Row
import io.eels.schema.{Partition, PartitionEntry}

import scala.util.control.NonFatal

object RowPartitionFn extends Logging {

  
  def apply(row: Row, partitionKeys: Seq[String]): Partition = {
    require(
      partitionKeys.forall { key => row.schema.fieldNames().contains(key) },
      s"The row schema must include data for all partitions; schema fields=${row.schema.fieldNames()}; expected partitions=$partitionKeys"
    )

    val entries = partitionKeys.map { fieldName =>
      val index = row.schema.indexOf(fieldName)
      try {
        val value = row.values(index)
        require(value != null, s"Partition value cannot be null for $fieldName")
        require(value.toString.trim.nonEmpty, s"Partition value cannot be empty for $fieldName")
        require(!value.toString.contains(" "), s"Values for partitions cannot contain spaces $fieldName=$value (index $index)")
        PartitionEntry(fieldName, value.toString)
      } catch {
        case NonFatal(t) =>
          logger.error(s"Could not get value for partition $fieldName. Row=$row")
          throw t
      }
    }

    Partition(entries)
  }
} 
Example 69
Source File: HivePartitionPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.datastream.{Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.metastore.IMetaStoreClient

import scala.util.control.NonFatal


class HivePartitionPublisher(dbName: String,
                             tableName: String,
                             projectionSchema: StructType,
                             partitionKeys: List[String], // partition keys for this table, used to map the partition values back to a map
                             dialect: HiveDialect // used to open up the files to check they exist if checkDataForPartitionOnlySources is true
                            )
                            (implicit fs: FileSystem,
                             client: IMetaStoreClient) extends Publisher[Seq[Row]] with Logging {

  private val config = ConfigFactory.load()

  // if this is true, then we will still check that some files exist for each partition, to avoid
  // a situation where the partitions have been created in the hive metastore, but no actual
  // data has been written using those yet.
  private val partitionPartFileCheck = config.getBoolean("eel.hive.source.checkDataForPartitionOnlySources")
  logger.info(s"eel.hive.source.checkDataForPartitionOnlySources=$partitionPartFileCheck")

  // returns true if the partition exists on disk
  private def isPartitionPhysical(part: org.apache.hadoop.hive.metastore.api.Partition): Boolean = {
    val location = new Path(part.getSd.getLocation)
    logger.debug(s"Checking that partition $location has been created on disk...")
    try {
      val exists = fs.exists(location)
      if (exists) {
        logger.debug("...exists")
      } else {
        logger.debug("...not found")
      }
      exists
    } catch {
      case NonFatal(e) =>
        logger.warn(s"Error reading $location", e)
        false
    }
  }

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = client.synchronized {
    try {

      import scala.collection.JavaConverters._

      // each row will contain just the values from the metastore
      val rows = client.listPartitions(dbName, tableName, Short.MaxValue).asScala.filter { part =>
        !partitionPartFileCheck || isPartitionPhysical(part)
      }.map { part =>
        // the partition values are assumed to be the same order as the supplied partition keys
        // first we build a map of the keys to values, then use that map to return a Row with
        // values in the order set by the fieldNames parameter
        val map = partitionKeys.zip(part.getValues.asScala).toMap
        Row(projectionSchema, projectionSchema.fieldNames.map(map(_)).toVector)
      }

      logger.debug(s"After scanning partitions and files we have ${rows.size} rows")
      subscriber.subscribed(Subscription.empty)
      rows.iterator.grouped(10).foreach(subscriber.next)
      subscriber.completed()
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 70
Source File: Netty3.scala    From roc   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package roc
package postgresql
package transport

import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.channel._
import org.jboss.netty.handler.codec.frame.FrameDecoder
import scala.util.control.NonFatal

private[roc] final class PacketFrameDecoder extends FrameDecoder {

  override def decode(ctx: ChannelHandlerContext, channel: Channel,
                      buffer: ChannelBuffer): Packet = {
    if(buffer.readableBytes < Packet.HeaderSize) return null

    buffer.markReaderIndex()
    val code = buffer.readByte.toChar
    val length = buffer.readInt 

    if(buffer.readableBytes < (length - 4)) {
      buffer.resetReaderIndex()
      return null
    }

    val body = new Array[Byte](length - 4)
    buffer.readBytes(body)
    Packet(Some(code), BufferReader(body))
  }

}

private[transport] final class PacketWriter extends SimpleChannelDownstreamHandler {
  override def writeRequested(ctx: ChannelHandlerContext, evt: MessageEvent) =
    evt.getMessage match {
      case p: Packet =>
        try {
          val cb = p.toChannelBuffer
          Channels.write(ctx, evt.getFuture, cb, evt.getRemoteAddress)
        } catch {
          case NonFatal(e) =>
            val _ = evt.getFuture.setFailure(new ChannelException(e.getMessage))
        }

      case unknown =>
        val _ = evt.getFuture.setFailure(new ChannelException(
          "Unsupported request type %s".format(unknown.getClass.getName)))
    }
}


private[roc] object PostgresqlClientPipelineFactory extends ChannelPipelineFactory {
  def getPipeline = {
    val pipeline = Channels.pipeline()
    pipeline.addLast("pgPacketDecoder", new PacketFrameDecoder)
    pipeline.addLast("pgPacketWriter", new PacketWriter)
    pipeline
  }
} 
Example 71
Source File: Step1_PrimarySpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.actor.ActorSystem
import org.scalatest.FunSuiteLike
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import akka.testkit.ImplicitSender
import akka.testkit.TestProbe
import scala.concurrent.duration._
import kvstore.Persistence.{ Persisted, Persist }
import kvstore.Replica.OperationFailed
import kvstore.Replicator.{ Snapshot }
import scala.util.Random
import scala.util.control.NonFatal
import org.scalactic.ConversionCheckedTripleEquals

class Step1_PrimarySpec extends TestKit(ActorSystem("Step1PrimarySpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  import Arbiter._

  test("case1: Primary (in isolation) should properly register itself to the provided Arbiter") {
    val arbiter = TestProbe()
        system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-primary")
    
    arbiter.expectMsg(Join)
  }

  test("case2: Primary (in isolation) should react properly to Insert, Remove, Get") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-primary")
        val client = session(primary)

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)

    client.getAndVerify("k1")
    client.setAcked("k1", "v1")
    client.getAndVerify("k1")
    client.getAndVerify("k2")
    client.setAcked("k2", "v2")
    client.getAndVerify("k2")
    client.removeAcked("k1")
    client.getAndVerify("k1")
  }

  
} 
Example 72
Source File: package.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
import scala.language.postfixOps
import scala.io.StdIn
import scala.util._
import scala.util.control.NonFatal
import scala.concurrent._
import scala.concurrent.duration._
import ExecutionContext.Implicits.global
import scala.async.Async.{async, await}


    def apply() = new CancellationTokenSource {
      val p = Promise[Unit]()
      val cancellationToken = new CancellationToken {
        def isCancelled = p.future.value != None
      }
      def unsubscribe() {
        p.trySuccess(())
      }
    }
  }
} 
Example 73
Source File: NetworkTime.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.utils

import java.net.InetAddress

import com.typesafe.scalalogging.StrictLogging
import encry.utils.NetworkTime.Time
import org.apache.commons.net.ntp.{NTPUDPClient, TimeInfo}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.util.Left
import scala.util.control.NonFatal

object NetworkTime {
  def localWithOffset(offset: Long): Long = System.currentTimeMillis() + offset

  type Offset = Long
  type Time = Long
}

protected case class NetworkTime(offset: NetworkTime.Offset, lastUpdate: NetworkTime.Time)

case class NetworkTimeProviderSettings(server: String, updateEvery: FiniteDuration, timeout: FiniteDuration)

class NetworkTimeProvider(ntpSettings: NetworkTimeProviderSettings) extends StrictLogging {

  private var state: State = Right(NetworkTime(0L, 0L))
  private var delta: Time = 0L

  private type State = Either[(NetworkTime, Future[NetworkTime]), NetworkTime]

  private def updateOffSet(): Option[NetworkTime.Offset] = {
    val client: NTPUDPClient = new NTPUDPClient()
    client.setDefaultTimeout(ntpSettings.timeout.toMillis.toInt)
    try {
      client.open()
      val info: TimeInfo = client.getTime(InetAddress.getByName(ntpSettings.server))
      info.computeDetails()
      Option(info.getOffset)
    } catch {
      case t: Throwable => None
    } finally {
      client.close()
    }
  }

  private def timeAndState(currentState: State): Future[(NetworkTime.Time, State)] =
    currentState match {
      case Right(nt) =>
        val time: Long = NetworkTime.localWithOffset(nt.offset)
        val state: Either[(NetworkTime, Future[NetworkTime]), NetworkTime] =
          if (time > nt.lastUpdate + ntpSettings.updateEvery.toMillis) {
            Left(nt -> Future(updateOffSet()).map { mbOffset =>
              logger.info("New offset adjusted: " + mbOffset)
              val offset = mbOffset.getOrElse(nt.offset)
              NetworkTime(offset, NetworkTime.localWithOffset(offset))
            })
          } else Right(nt)
        Future.successful((time, state))
      case Left((nt, networkTimeFuture)) =>
        networkTimeFuture
          .map(networkTime => NetworkTime.localWithOffset(networkTime.offset) -> Right(networkTime))
          .recover {
            case NonFatal(th) =>
              logger.warn(s"Failed to evaluate networkTimeFuture $th")
              NetworkTime.localWithOffset(nt.offset) -> Left(nt -> networkTimeFuture)
          }
    }

  def estimatedTime: Time = state match {
    case Right(nt) if NetworkTime.localWithOffset(nt.offset) <= nt.lastUpdate + ntpSettings.updateEvery.toMillis =>
      NetworkTime.localWithOffset(nt.offset)
    case _ => System.currentTimeMillis() + delta
  }

  def time(): Future[NetworkTime.Time] =
    timeAndState(state)
      .map { case (timeFutureResult, stateFutureResult) =>
        state = stateFutureResult
        delta = timeFutureResult - System.currentTimeMillis()
        timeFutureResult
      }

} 
Example 74
Source File: GlobalTimer.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.it.util

import io.netty.util.{HashedWheelTimer, Timer}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

object GlobalTimer {

  val timer: Timer = new HashedWheelTimer()
  sys.addShutdownHook {
    timer.stop()
  }

  implicit class TimerExt(val timer: Timer) extends AnyVal {
    def schedule[A](f: => Future[A], delay: FiniteDuration): Future[A] = {
      val p = Promise[A]
      try {
        timer.newTimeout(_ => p.completeWith(f), delay.length, delay.unit)
      } catch {
        case NonFatal(e) => p.failure(e)
      }
      p.future
    }

    def sleep(term: FiniteDuration): Future[Unit] = schedule(Future.successful(()), term)

    def retryUntil[A](f: => Future[A], cond: A => Boolean, retryInterval: FiniteDuration)(implicit ec: ExecutionContext): Future[A] =
      f.flatMap(v => if (cond(v)) Future.successful(v) else schedule(retryUntil(f, cond, retryInterval), retryInterval))
  }
} 
Example 75
Source File: AsyncValidationRules.scala    From octopus   with Apache License 2.0 5 votes vote down vote up
package octopus

import octopus.AsyncValidatorM.instance
import shapeless.{::, Generic, HNil}

import scala.language.higherKinds
import scala.reflect.ClassTag
import scala.util.control.NonFatal

object AsyncValidationRules extends Serializable {

  def rule[M[_]: AppError, T](asyncPred: T => M[Boolean], whenInvalid: String): AsyncValidatorM[M, T] =
    instance { obj: T =>
      AppError[M].map(asyncPred(obj))(if(_) Nil else List(ValidationError(whenInvalid)))
    }

  def ruleVC[M[_]: AppError, T, V](asyncPred: V => M[Boolean], whenInvalid: String)
                                  (implicit gen: Generic.Aux[T, V :: HNil]): AsyncValidatorM[M, T] =
    instance { obj: T =>
      rule[M, V](asyncPred, whenInvalid)
        .validate(gen.to(obj).head)
    }

  def ruleCatchOnly[M[_]: AppError, T, E <: Throwable : ClassTag](asyncPred: T => M[Boolean],
                                                                  whenInvalid: String,
                                                                  whenCaught: E => String): AsyncValidatorM[M, T] =
    instance { obj: T =>
      AppError[M].recover(
        try {
          rule(asyncPred, whenInvalid).validate(obj)
        } catch {
          case NonFatal(ex) =>
            AppError[M].failed(ex)
        },
        {
          case ex if implicitly[ClassTag[E]].runtimeClass.isInstance(ex) =>
            List(ValidationError(whenCaught(ex.asInstanceOf[E])))
        }
      )
    }

  def ruleCatchNonFatal[M[_]: AppError, T](asyncPred: T => M[Boolean],
                                           whenInvalid: String,
                                           whenCaught: Throwable => String): AsyncValidatorM[M, T] =
    instance { obj: T =>
      AppError[M].recover(
        try {
          rule(asyncPred, whenInvalid).validate(obj)
        } catch {
          case NonFatal(ex) =>
            AppError[M].failed(ex)
        },
        {
          case NonFatal(ex) =>
            List(ValidationError(whenCaught(ex)))
        }
      )
    }

  def ruleEither[M[_]: AppError, T](asyncPred: T => M[Either[String, Boolean]],
                                    whenInvalid: String): AsyncValidatorM[M, T] =
    instance { obj: T =>
      AppError[M].map(asyncPred(obj)) {
        case Right(true) => Nil
        case Right(false) => List(ValidationError(whenInvalid))
        case Left(why) => List(ValidationError(why))
      }
    }

  def ruleOption[M[_]: AppError, T](asyncPred: T => M[Option[Boolean]],
                                    whenInvalid: String,
                                    whenNone: String): AsyncValidatorM[M, T] =
    instance { obj: T =>
      AppError[M].map(asyncPred(obj)) {
        case Some(true) => Nil
        case Some(false) => List(ValidationError(whenInvalid))
        case None => List(ValidationError(whenNone))
      }
    }
} 
Example 76
Source File: ValidationRules.scala    From octopus   with Apache License 2.0 5 votes vote down vote up
package octopus

import shapeless.{::, Generic, HNil}

import scala.reflect.ClassTag
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}

object ValidationRules extends Serializable {

  def rule[T](pred: T => Boolean, whenInvalid: String): Validator[T] =
    (obj: T) => if (pred(obj)) Nil else List(ValidationError(whenInvalid))

  def ruleVC[T, V](pred: V => Boolean, whenInvalid: String)
                  (implicit gen: Generic.Aux[T, V :: HNil]): Validator[T] =
    (obj: T) => rule[V](pred, whenInvalid)
      .validate(gen.to(obj).head)

  def ruleCatchOnly[T, E <: Throwable : ClassTag](pred: T => Boolean,
                                                  whenInvalid: String,
                                                  whenCaught: E => String): Validator[T] =
    (obj: T) => try {
      rule(pred, whenInvalid).validate(obj)
    } catch {
      case ex if implicitly[ClassTag[E]].runtimeClass.isInstance(ex) =>
        List(ValidationError(whenCaught(ex.asInstanceOf[E])))
    }

  def ruleCatchNonFatal[T](pred: T => Boolean,
                           whenInvalid: String,
                           whenCaught: Throwable => String): Validator[T] =
    (obj: T) => try {
      rule(pred, whenInvalid).validate(obj)
    } catch {
      case NonFatal(ex) =>
        List(ValidationError(whenCaught(ex)))
    }

  def ruleTry[T](pred: T => Try[Boolean],
                 whenInvalid: String,
                 whenFailure: Throwable => String): Validator[T] =
    (obj: T) => pred(obj) match {
      case Success(true) => Nil
      case Success(false) => List(ValidationError(whenInvalid))
      case Failure(why) => List(ValidationError(whenFailure(why)))
    }

  def ruleEither[T](pred: T => Either[String, Boolean],
                    whenInvalid: String): Validator[T] =
    (obj: T) => pred(obj) match {
      case Right(true) => Nil
      case Right(false) => List(ValidationError(whenInvalid))
      case Left(why) => List(ValidationError(why))
    }

  def ruleOption[T](pred: T => Option[Boolean],
                    whenInvalid: String,
                    whenNone: String): Validator[T] =
    (obj: T) => pred(obj) match {
      case Some(true) => Nil
      case Some(false) => List(ValidationError(whenInvalid))
      case None => List(ValidationError(whenNone))
    }
} 
Example 77
Source File: RedisInputDStream.scala    From spark-redis   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.redislabs.provider.redis.streaming

import com.redislabs.provider.redis.RedisConfig
import org.apache.curator.utils.ThreadUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.dstream.ReceiverInputDStream

import redis.clients.jedis._

import scala.reflect.{ClassTag, classTag}
import scala.util.control.NonFatal


      keys.foreach{ key =>
        executorPool.submit(new MessageHandler(redisConfig.connectionForKey(key), key))
      }
    } finally {
      executorPool.shutdown()
    }
  }

  def onStop() {
  }

  private class MessageHandler(conn: Jedis, key: String) extends Runnable {
    def run() {
      try {
        while(!isStopped) {
          val response = conn.blpop(2, key)
          if (response == null || response.isEmpty) {
            // no-op
          } else if (classTag[T] == classTag[String]) {
            store(response.get(1).asInstanceOf[T])
          } else if (classTag[T] == classTag[(String, String)]) {
            store((response.get(0), response.get(1)).asInstanceOf[T])
          } else {
            throw new scala.Exception("Unknown Redis Streaming type")
          }
        }
      } catch {
        case NonFatal(e) =>
          restart("Error receiving data", e)
      } finally {
        onStop()
      }
    }
  }
} 
Example 78
Source File: TryWith.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.util

import scala.util.control.NonFatal
import scala.util.{Failure, Try}

object TryWith {

  def apply[C <: AutoCloseable, R](resource: => C)(f: C => R): Try[R] =
    Try(resource).flatMap(resourceInstance => {
      try {
        val returnValue = f(resourceInstance)
        Try(resourceInstance.close()).map(_ => returnValue)
      } catch {
        case NonFatal(exceptionInFunction) =>
          try {
            resourceInstance.close()
            Failure(exceptionInFunction)
          } catch {
            case NonFatal(exceptionInClose) =>
              exceptionInFunction.addSuppressed(exceptionInClose)
              Failure(exceptionInFunction)
          }
      }
    })
} 
Example 79
Source File: InitializingActor.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.akka

import akka.actor.{Actor, ActorRef, ReceiveTimeout, Stash}
import akka.pattern.pipe
import hydra.common.config.ActorConfigSupport
import hydra.common.logging.LoggingAdapter
import hydra.core.HydraException
import hydra.core.akka.InitializingActor.{InitializationError, Initialized}
import hydra.core.protocol.HydraMessage
import retry.Success

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.control.NonFatal

trait InitializingActor
    extends Actor
    with ActorConfigSupport
    with Stash
    with LoggingAdapter {

  
  def initializationError(ex: Throwable): Receive
}

object InitializingActor {

  case object Initialized extends HydraMessage

  case class InitializationError(cause: Throwable) extends HydraMessage

}

@SerialVersionUID(1L)
class ActorInitializationException(
    ingestor: ActorRef,
    message: String,
    cause: Throwable
) extends HydraException(
      ActorInitializationException.enrichedMessage(ingestor, message),
      cause
    ) {
  def getActor: ActorRef = ingestor
}

object ActorInitializationException {

  private def enrichedMessage(actor: ActorRef, message: String) =
    Option(actor).map(a => s"${a.path}: $message").getOrElse(message)

  private[hydra] def apply(
      actor: ActorRef,
      message: String,
      cause: Throwable = null
  ) =
    new ActorInitializationException(actor, message, cause)

  def unapply(
      ex: ActorInitializationException
  ): Option[(ActorRef, String, Throwable)] =
    Some((ex.getActor, ex.getMessage, ex.getCause))
} 
Example 80
Source File: DefaultParameters.scala    From mango   with Apache License 2.0 5 votes vote down vote up
package com.kakao.mango.reflect

import java.lang.reflect.{Method, Constructor}

import scala.reflect.runtime.universe._
import scala.util.control.NonFatal


  def apply(obj: AnyRef, method: Method): Array[AnyRef] = {
    val clazz = method.getDeclaringClass
    val name = method.getName

    val count = method.getParameterTypes.length
    val result = new Array[AnyRef](count)

    try {
      for (i <- 0 until count) {
        util.Try(clazz.getMethod(s"$name$$default$$${i+1}")).foreach { method =>
          result(i) = method.invoke(obj)
        }
      }
    } catch {
      case NonFatal(e) => // if there is no default parameters, return the array with null entries
    }

    result
  }

} 
Example 81
Source File: CreateHiveTableAsSelectCommand.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.execution

import scala.util.control.NonFatal

import org.apache.spark.sql.{AnalysisException, Row, SaveMode, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.DataWritingCommand



case class CreateHiveTableAsSelectCommand(
    tableDesc: CatalogTable,
    query: LogicalPlan,
    outputColumnNames: Seq[String],
    mode: SaveMode)
  extends DataWritingCommand {

  private val tableIdentifier = tableDesc.identifier

  override def run(sparkSession: SparkSession, child: SparkPlan): Seq[Row] = {
    val catalog = sparkSession.sessionState.catalog
    if (catalog.tableExists(tableIdentifier)) {
      assert(mode != SaveMode.Overwrite,
        s"Expect the table $tableIdentifier has been dropped when the save mode is Overwrite")

      if (mode == SaveMode.ErrorIfExists) {
        throw new AnalysisException(s"$tableIdentifier already exists.")
      }
      if (mode == SaveMode.Ignore) {
        // Since the table already exists and the save mode is Ignore, we will just return.
        return Seq.empty
      }

      // For CTAS, there is no static partition values to insert.
      val partition = tableDesc.partitionColumnNames.map(_ -> None).toMap
      InsertIntoHiveTable(
        tableDesc,
        partition,
        query,
        overwrite = false,
        ifPartitionNotExists = false,
        outputColumnNames = outputColumnNames).run(sparkSession, child)
    } else {
      // TODO ideally, we should get the output data ready first and then
      // add the relation into catalog, just in case of failure occurs while data
      // processing.
      assert(tableDesc.schema.isEmpty)
      catalog.createTable(
        tableDesc.copy(schema = outputColumns.toStructType), ignoreIfExists = false)

      try {
        // Read back the metadata of the table which was created just now.
        val createdTableMeta = catalog.getTableMetadata(tableDesc.identifier)
        // For CTAS, there is no static partition values to insert.
        val partition = createdTableMeta.partitionColumnNames.map(_ -> None).toMap
        InsertIntoHiveTable(
          createdTableMeta,
          partition,
          query,
          overwrite = true,
          ifPartitionNotExists = false,
          outputColumnNames = outputColumnNames).run(sparkSession, child)
      } catch {
        case NonFatal(e) =>
          // drop the created table.
          catalog.dropTable(tableIdentifier, ignoreIfNotExists = true, purge = false)
          throw e
      }
    }

    Seq.empty[Row]
  }

  override def argString: String = {
    s"[Database:${tableDesc.database}, " +
    s"TableName: ${tableDesc.identifier.table}, " +
    s"InsertIntoHiveTable]"
  }
} 
Example 82
Source File: XSQLCreateHiveTableAsSelectCommand.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.xsql.execution.command

import scala.util.control.NonFatal

import org.apache.spark.sql.{AnalysisException, Row, SaveMode, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.DataWritingCommand
import org.apache.spark.sql.xsql.XSQLSessionCatalog


case class XSQLCreateHiveTableAsSelectCommand(
    tableDesc: CatalogTable,
    query: LogicalPlan,
    outputColumnNames: Seq[String],
    mode: SaveMode)
  extends DataWritingCommand {

  override def run(sparkSession: SparkSession, child: SparkPlan): Seq[Row] = {
    val catalog = sparkSession.sessionState.catalog.asInstanceOf[XSQLSessionCatalog]
    val tableIdentifier = catalog.getUsedTableIdentifier(tableDesc.identifier)
    val newTableDesc = tableDesc.copy(identifier = tableIdentifier)
    if (catalog.tableExists(tableIdentifier)) {
      assert(
        mode != SaveMode.Overwrite,
        s"Expect the table $tableIdentifier has been dropped when the save mode is Overwrite")

      if (mode == SaveMode.ErrorIfExists) {
        throw new AnalysisException(s"$tableIdentifier already exists.")
      }
      if (mode == SaveMode.Ignore) {
        // Since the table already exists and the save mode is Ignore, we will just return.
        return Seq.empty
      }

      XSQLInsertIntoHiveTable(
        newTableDesc,
        Map.empty,
        query,
        overwrite = false,
        ifPartitionNotExists = false,
        outputColumnNames = outputColumnNames).run(sparkSession, child)
    } else {
      // TODO ideally, we should get the output data ready first and then
      // add the relation into catalog, just in case of failure occurs while data
      // processing.
      assert(newTableDesc.schema.isEmpty)
      catalog.createTable(newTableDesc.copy(schema = query.schema), ignoreIfExists = false)

      try {
        // Read back the metadata of the table which was created just now.
        val createdTableMeta = catalog.getTableMetadata(newTableDesc.identifier)
        // For CTAS, there is no static partition values to insert.
        val partition = createdTableMeta.partitionColumnNames.map(_ -> None).toMap
        XSQLInsertIntoHiveTable(
          createdTableMeta,
          partition,
          query,
          overwrite = true,
          ifPartitionNotExists = false,
          outputColumnNames = outputColumnNames).run(sparkSession, child)
      } catch {
        case NonFatal(e) =>
          // drop the created table.
          catalog.dropTable(tableIdentifier, ignoreIfNotExists = true, purge = false)
          throw e
      }
    }

    Seq.empty[Row]
  }

  override def argString: String = {
    s"[TableName: ${tableDesc.identifier.table}, " +
      s"InsertIntoHiveTable]"
  }
} 
Example 83
Source File: CodeGeneratorWithInterpretedFallback.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst.expressions

import scala.util.control.NonFatal

import org.apache.spark.internal.Logging
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.Utils


abstract class CodeGeneratorWithInterpretedFallback[IN, OUT] extends Logging {

  def createObject(in: IN): OUT = {
    // We are allowed to choose codegen-only or no-codegen modes if under tests.
    val config = SQLConf.get.getConf(SQLConf.CODEGEN_FACTORY_MODE)
    val fallbackMode = CodegenObjectFactoryMode.withName(config)

    fallbackMode match {
      case CodegenObjectFactoryMode.CODEGEN_ONLY if Utils.isTesting =>
        createCodeGeneratedObject(in)
      case CodegenObjectFactoryMode.NO_CODEGEN if Utils.isTesting =>
        createInterpretedObject(in)
      case _ =>
        try {
          createCodeGeneratedObject(in)
        } catch {
          case NonFatal(_) =>
            // We should have already seen the error message in `CodeGenerator`
            logWarning("Expr codegen error and falling back to interpreter mode")
            createInterpretedObject(in)
        }
    }
  }

  protected def createCodeGeneratedObject(in: IN): OUT
  protected def createInterpretedObject(in: IN): OUT
} 
Example 84
Source File: ResolveInlineTables.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst.analysis

import scala.util.control.NonFatal

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{StructField, StructType}


  private[analysis] def convert(table: UnresolvedInlineTable): LocalRelation = {
    // For each column, traverse all the values and find a common data type and nullability.
    val fields = table.rows.transpose.zip(table.names).map { case (column, name) =>
      val inputTypes = column.map(_.dataType)
      val tpe = TypeCoercion.findWiderTypeWithoutStringPromotion(inputTypes).getOrElse {
        table.failAnalysis(s"incompatible types found in column $name for inline table")
      }
      StructField(name, tpe, nullable = column.exists(_.nullable))
    }
    val attributes = StructType(fields).toAttributes
    assert(fields.size == table.names.size)

    val newRows: Seq[InternalRow] = table.rows.map { row =>
      InternalRow.fromSeq(row.zipWithIndex.map { case (e, ci) =>
        val targetType = fields(ci).dataType
        try {
          val castedExpr = if (e.dataType.sameType(targetType)) {
            e
          } else {
            cast(e, targetType)
          }
          castedExpr.eval()
        } catch {
          case NonFatal(ex) =>
            table.failAnalysis(s"failed to evaluate expression ${e.sql}: ${ex.getMessage}", ex)
        }
      })
    }

    LocalRelation(attributes, newRows)
  }
} 
Example 85
Source File: FileStreamSink.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import scala.util.control.NonFatal

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path

import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.datasources.{BasicWriteJobStatsTracker, FileFormat, FileFormatWriter}
import org.apache.spark.util.SerializableConfiguration

object FileStreamSink extends Logging {
  // The name of the subdirectory that is used to store metadata about which files are valid.
  val metadataDir = "_spark_metadata"

  
class FileStreamSink(
    sparkSession: SparkSession,
    path: String,
    fileFormat: FileFormat,
    partitionColumnNames: Seq[String],
    options: Map[String, String]) extends Sink with Logging {

  private val basePath = new Path(path)
  private val logPath = new Path(basePath, FileStreamSink.metadataDir)
  private val fileLog =
    new FileStreamSinkLog(FileStreamSinkLog.VERSION, sparkSession, logPath.toUri.toString)
  private val hadoopConf = sparkSession.sessionState.newHadoopConf()

  private def basicWriteJobStatsTracker: BasicWriteJobStatsTracker = {
    val serializableHadoopConf = new SerializableConfiguration(hadoopConf)
    new BasicWriteJobStatsTracker(serializableHadoopConf, BasicWriteJobStatsTracker.metrics)
  }

  override def addBatch(batchId: Long, data: DataFrame): Unit = {
    if (batchId <= fileLog.getLatest().map(_._1).getOrElse(-1L)) {
      logInfo(s"Skipping already committed batch $batchId")
    } else {
      val committer = FileCommitProtocol.instantiate(
        className = sparkSession.sessionState.conf.streamingFileCommitProtocolClass,
        jobId = batchId.toString,
        outputPath = path)

      committer match {
        case manifestCommitter: ManifestFileCommitProtocol =>
          manifestCommitter.setupManifestOptions(fileLog, batchId)
        case _ =>  // Do nothing
      }

      // Get the actual partition columns as attributes after matching them by name with
      // the given columns names.
      val partitionColumns: Seq[Attribute] = partitionColumnNames.map { col =>
        val nameEquality = data.sparkSession.sessionState.conf.resolver
        data.logicalPlan.output.find(f => nameEquality(f.name, col)).getOrElse {
          throw new RuntimeException(s"Partition column $col not found in schema ${data.schema}")
        }
      }
      val qe = data.queryExecution

      FileFormatWriter.write(
        sparkSession = sparkSession,
        plan = qe.executedPlan,
        fileFormat = fileFormat,
        committer = committer,
        outputSpec = FileFormatWriter.OutputSpec(path, Map.empty, qe.analyzed.output),
        hadoopConf = hadoopConf,
        partitionColumns = partitionColumns,
        bucketSpec = None,
        statsTrackers = Seq(basicWriteJobStatsTracker),
        options = options)
    }
  }

  override def toString: String = s"FileSink[$path]"
} 
Example 86
Source File: WriteToContinuousDataSourceExec.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming.continuous

import scala.util.control.NonFatal

import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.streaming.StreamExecution
import org.apache.spark.sql.sources.v2.writer.streaming.StreamWriter


case class WriteToContinuousDataSourceExec(writer: StreamWriter, query: SparkPlan)
    extends SparkPlan with Logging {
  override def children: Seq[SparkPlan] = Seq(query)
  override def output: Seq[Attribute] = Nil

  override protected def doExecute(): RDD[InternalRow] = {
    val writerFactory = writer.createWriterFactory()
    val rdd = new ContinuousWriteRDD(query.execute(), writerFactory)

    logInfo(s"Start processing data source writer: $writer. " +
      s"The input RDD has ${rdd.partitions.length} partitions.")
    EpochCoordinatorRef.get(
      sparkContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY),
      sparkContext.env)
      .askSync[Unit](SetWriterPartitions(rdd.getNumPartitions))

    try {
      // Force the RDD to run so continuous processing starts; no data is actually being collected
      // to the driver, as ContinuousWriteRDD outputs nothing.
      rdd.collect()
    } catch {
      case _: InterruptedException =>
        // Interruption is how continuous queries are ended, so accept and ignore the exception.
      case cause: Throwable =>
        cause match {
          // Do not wrap interruption exceptions that will be handled by streaming specially.
          case _ if StreamExecution.isInterruptionException(cause) => throw cause
          // Only wrap non fatal exceptions.
          case NonFatal(e) => throw new SparkException("Writing job aborted.", e)
          case _ => throw cause
        }
    }

    sparkContext.emptyRDD
  }
} 
Example 87
Source File: StreamMetadata.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets
import java.util.ConcurrentModificationException

import scala.util.control.NonFatal

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileAlreadyExistsException, FSDataInputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.streaming.CheckpointFileManager.CancellableFSDataOutputStream
import org.apache.spark.sql.streaming.StreamingQuery


  def write(
      metadata: StreamMetadata,
      metadataFile: Path,
      hadoopConf: Configuration): Unit = {
    var output: CancellableFSDataOutputStream = null
    try {
      val fileManager = CheckpointFileManager.create(metadataFile.getParent, hadoopConf)
      output = fileManager.createAtomic(metadataFile, overwriteIfPossible = false)
      val writer = new OutputStreamWriter(output)
      Serialization.write(metadata, writer)
      writer.close()
    } catch {
      case e: FileAlreadyExistsException =>
        if (output != null) {
          output.cancel()
        }
        throw new ConcurrentModificationException(
          s"Multiple streaming queries are concurrently using $metadataFile", e)
      case e: Throwable =>
        if (output != null) {
          output.cancel()
        }
        logError(s"Error writing stream metadata $metadata to $metadataFile", e)
        throw e
    }
  }
} 
Example 88
Source File: JsonParserSettings.scala    From play-json   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.json

import java.math.MathContext

import scala.util.control.NonFatal


  val settings: JsonParserSettings = {
    // Initialize the parser settings from System properties. This way it is possible to users
    // to easily replace the default values.
    val scaleLimit  = parseNum("play.json.parser.scaleLimit", defaultScaleLimit)(_.toInt)
    val digitsLimit = parseNum("play.json.parser.digitsLimit", defaultDigitsLimit)(_.toInt)
    val mathContext = parseMathContext("play.json.parser.mathContext")

    val minPlain: BigDecimal = parseNum("play.json.serializer.minPlain", MinPlain)(BigDecimal.exact)
    val maxPlain: BigDecimal = parseNum("play.json.serializer.maxPlain", MaxPlain)(BigDecimal.exact)

    JsonParserSettings(
      BigDecimalParseSettings(
        mathContext,
        scaleLimit,
        digitsLimit
      ),
      BigDecimalSerializerSettings(
        minPlain,
        maxPlain
      )
    )
  }

  private def parseMathContext(key: String): MathContext = sys.props.get(key).map(_.toLowerCase) match {
    case Some("decimal128") => MathContext.DECIMAL128
    case Some("decimal64")  => MathContext.DECIMAL64
    case Some("decimal32")  => MathContext.DECIMAL32
    case Some("unlimited")  => MathContext.UNLIMITED
    case _                  => defaultMathContext
  }

  private def parseNum[T](key: String, default: T)(f: String => T): T =
    try {
      sys.props.get(key).map(f).getOrElse(default)
    } catch {
      case NonFatal(_) => default
    }
} 
Example 89
Source File: SQLContextExtensionBase.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.extension

import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{ParserDialect, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.{Analyzer, FunctionRegistry, SimpleFunctionRegistry}
import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.DDLParser
import org.apache.spark.sql.extension.OptimizerFactory.ExtendableOptimizerBatch
import org.apache.spark.util.Utils

import scala.util.Try
import scala.util.control.NonFatal


  override protected def extendedParserDialect: ParserDialect =
    try {
      val clazz = Utils.classForName(dialectClassName)
      clazz.newInstance().asInstanceOf[ParserDialect]
    } catch {
      case NonFatal(e) =>
        // Since we didn't find the available SQL Dialect, it will fail even for SET command:
        // SET spark.sql.dialect=sql; Let's reset as default dialect automatically.
        val dialect = conf.dialect
        // reset the sql dialect
        conf.unsetConf(SQLConf.DIALECT)
        // throw out the exception, and the default sql dialect will take effect for next query.
        throw new DialectException(
          s"""
              |Instantiating dialect '$dialect' failed.
              |Reverting to default dialect '${conf.dialect}'""".stripMargin, e)
    }

  // (suggestion) make this implicit to FunctionRegistry.
  protected def registerBuiltins(registry: FunctionRegistry): Unit = {
    FunctionRegistry.expressions.foreach {
      case (name, (info, builder)) => registry.registerFunction(name, builder)
    }
  }

  override protected def extendedDdlParser(parser: String => LogicalPlan): DDLParser =
    new DDLParser(sqlParser.parse(_))

  override protected def registerFunctions(registry: FunctionRegistry): Unit = { }

} 
Example 90
Source File: ERPCurrencyConversionExpression.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.currency.erp

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.expressions.{Expression, ImplicitCastInputTypes}
import org.apache.spark.sql.currency.CurrencyConversionException
import org.apache.spark.sql.currency.erp.ERPConversionLoader.RConversionOptionsCurried
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String

import scala.util.control.NonFatal



case class ERPCurrencyConversionExpression(
    conversionFunction: RConversionOptionsCurried,
    children: Seq[Expression])
  extends Expression
  with ImplicitCastInputTypes
  with CodegenFallback {

  protected val CLIENT_INDEX = 0
  protected val CONVERSION_TYPE_INDEX = 1
  protected val AMOUNT_INDEX = 2
  protected val FROM_INDEX = 3
  protected val TO_INDEX = 4
  protected val DATE_INDEX = 5
  protected val NUM_ARGS = 6

  protected val errorMessage = "Currency conversion library encountered an internal error"


  override def eval(input: InternalRow): Any = {
    val inputArguments = children.map(_.eval(input))

    require(inputArguments.length == NUM_ARGS, "wrong number of arguments")

    // parse arguments
    val client = Option(inputArguments(CLIENT_INDEX).asInstanceOf[UTF8String]).map(_.toString)
    val conversionType =
      Option(inputArguments(CONVERSION_TYPE_INDEX).asInstanceOf[UTF8String]).map(_.toString)
    val amount = Option(inputArguments(AMOUNT_INDEX).asInstanceOf[Decimal].toJavaBigDecimal)
    val sourceCurrency =
      Option(inputArguments(FROM_INDEX).asInstanceOf[UTF8String]).map(_.toString)
    val targetCurrency = Option(inputArguments(TO_INDEX).asInstanceOf[UTF8String]).map(_.toString)
    val date = Option(inputArguments(DATE_INDEX).asInstanceOf[UTF8String]).map(_.toString)

    // perform conversion
    val conversion =
      conversionFunction(client, conversionType, sourceCurrency, targetCurrency, date)
    val resultTry = conversion(amount)

    // If 'resultTry' holds a 'Failure', we have to propagate it because potential failure
    // handling already took place. We just wrap it in case it is a cryptic error.
    resultTry.recover {
      case NonFatal(err) => throw new CurrencyConversionException(errorMessage, err)
    }.get.map(Decimal.apply).orNull
  }

  override def dataType: DataType = DecimalType.forType(DoubleType)

  override def nullable: Boolean = true

  override def inputTypes: Seq[AbstractDataType] =
    Seq(StringType, StringType, DecimalType, StringType, StringType, StringType)

  def inputNames: Seq[String] =
    Seq("client", "conversion_type", "amount", "source", "target", "date")

  def getChild(name: String): Option[Expression] = {
    inputNames.zip(children).find { case (n, _) => name == n }.map(_._2)
  }
} 
Example 91
Source File: DefaultFileWatchService.scala    From play-file-watch   with Apache License 2.0 5 votes vote down vote up
package play.dev.filewatch

import java.io.File
import java.nio.file.FileSystems

import io.methvin.watcher.DirectoryChangeEvent
import io.methvin.watcher.DirectoryChangeListener
import io.methvin.watcher.DirectoryWatcher
import io.methvin.watchservice.MacOSXListeningWatchService

import scala.collection.JavaConverters._
import scala.util.control.NonFatal


class DefaultFileWatchService(logger: LoggerProxy, isMac: Boolean) extends FileWatchService {

  def this(logger: LoggerProxy) = this(logger, false)

  def watch(filesToWatch: Seq[File], onChange: () => Unit) = {
    val dirsToWatch = filesToWatch.filter { file =>
      if (file.isDirectory) {
        true
      } else if (file.isFile) {
        logger.warn("An attempt has been made to watch the file: " + file.getCanonicalPath)
        logger.warn("DefaultFileWatchService only supports watching directories. The file will not be watched.")
        false
      } else false
    }

    val watchService = if (isMac) new MacOSXListeningWatchService() else FileSystems.getDefault.newWatchService()
    val directoryWatcher =
      DirectoryWatcher
        .builder()
        .paths(dirsToWatch.map(_.toPath).asJava)
        .listener(new DirectoryChangeListener {
          override def onEvent(event: DirectoryChangeEvent): Unit = onChange()
        })
        .watchService(watchService)
        .build()

    val thread = new Thread(
      new Runnable {
        override def run(): Unit = {
          try {
            directoryWatcher.watch()
          } catch {
            case NonFatal(_) => // Do nothing, this means the watch service has been closed, or we've been interrupted.
          }
        }
      },
      "play-watch-service"
    )
    thread.setDaemon(true)
    thread.start()

    new FileWatcher {
      override def stop(): Unit = directoryWatcher.close()
    }
  }
} 
Example 92
Source File: S3SnapshotStore.scala    From akka-persistence-s3   with MIT License 5 votes vote down vote up
package akka.persistence.s3
package snapshot

import java.io.ByteArrayInputStream
import akka.actor.ActorLogging
import akka.persistence.serialization.Snapshot
import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria }
import akka.persistence.snapshot.SnapshotStore
import akka.serialization.SerializationExtension
import com.amazonaws.services.s3.model.{ ObjectMetadata, S3ObjectInputStream, ListObjectsRequest }
import com.typesafe.config.Config
import scala.collection.JavaConversions._
import scala.collection.immutable
import scala.concurrent.Future
import scala.util.control.NonFatal

case class SerializationResult(stream: ByteArrayInputStream, size: Int)

class S3SnapshotStore(config: Config) extends SnapshotStore with ActorLogging with SnapshotKeySupport {
  import context.dispatcher

  val settings = new S3SnapshotConfig(config)

  val s3Client: S3Client = new S3Client {
    val s3ClientConfig = new S3ClientConfig(context.system.settings.config.getConfig("s3-client"))
  }

  private val serializationExtension = SerializationExtension(context.system)

  private val s3Dispatcher = context.system.dispatchers.lookup("s3-snapshot-store.s3-client-dispatcher")

  val extensionName = settings.extension

  override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
    snapshotMetadatas(persistenceId, criteria)
      .map(_.sorted.takeRight(settings.maxLoadAttempts))
      .flatMap(load)
  }

  private def load(metadata: immutable.Seq[SnapshotMetadata]): Future[Option[SelectedSnapshot]] = metadata.lastOption match {
    case None => Future.successful(None)
    case Some(md) =>
      s3Client.getObject(settings.bucketName, snapshotKey(md))(s3Dispatcher)
        .map { obj =>
          val snapshot = deserialize(obj.getObjectContent)
          Some(SelectedSnapshot(md, snapshot.data))
        } recoverWith {
          case NonFatal(e) =>
            log.error(e, s"Error loading snapshot [${md}]")
            load(metadata.init) // try older snapshot
        }
  }

  override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {
    val serialized = serialize(Snapshot(snapshot))
    val objectMetadata = new ObjectMetadata()
    objectMetadata.setContentLength(serialized.size)
    s3Client.putObject(
      settings.bucketName,
      snapshotKey(metadata),
      serialized.stream,
      objectMetadata
    )(s3Dispatcher).map(_ => ())
  }

  override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = {
    if (metadata.timestamp == 0L)
      deleteAsync(metadata.persistenceId, SnapshotSelectionCriteria(metadata.sequenceNr, Long.MaxValue, metadata.sequenceNr, Long.MinValue))
    else
      s3Client.deleteObject(settings.bucketName, snapshotKey(metadata))(s3Dispatcher)
  }

  override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = {
    val metadatas = snapshotMetadatas(persistenceId, criteria)
    metadatas.map(list => Future.sequence(list.map(deleteAsync)))
  }

  private def snapshotMetadatas(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[List[SnapshotMetadata]] = {
    s3Client.listObjects(
      new ListObjectsRequest()
        .withBucketName(settings.bucketName)
        .withPrefix(prefixFromPersistenceId(persistenceId))
        .withDelimiter("/")
    )(s3Dispatcher)
      .map(_.getObjectSummaries.toList.map(s => parseKeyToMetadata(s.getKey))
        .filter(m => m.sequenceNr >= criteria.minSequenceNr && m.sequenceNr <= criteria.maxSequenceNr && m.timestamp >= criteria.minTimestamp && m.timestamp <= criteria.maxTimestamp))

  }

  protected def deserialize(inputStream: S3ObjectInputStream): Snapshot =
    serializationExtension.deserialize(akka.persistence.serialization.streamToBytes(inputStream), classOf[Snapshot]).get

  protected def serialize(snapshot: Snapshot): SerializationResult = {
    val serialized = serializationExtension.findSerializerFor(snapshot).toBinary(snapshot)
    SerializationResult(new ByteArrayInputStream(serializationExtension.findSerializerFor(snapshot).toBinary(snapshot)), serialized.size)
  }
} 
Example 93
Source File: Loop.scala    From Hive-JDBC-Proxy   with Apache License 2.0 5 votes vote down vote up
package com.enjoyyin.hive.proxy.jdbc.util

import java.util.concurrent.atomic.AtomicBoolean
import scala.util.control.NonFatal
import com.enjoyyin.hive.proxy.jdbc.exception.IngoreEventException



abstract class DaemonThread(override val threadName: String) extends LoopThread

trait LoopThread extends Logging {
  
  val threadName: String
  
  private val stopped = new AtomicBoolean(false)
  
  private var startTime = 0l
  
  private val eventThread: Thread = new Thread(threadName) {
    setDaemon(true)
    override def run(): Unit = {
      startTime = System.currentTimeMillis
      while (!stopped.get) {
        Utils.tryCatch(doLoop){
          case _: InterruptedException => return // exit even if eventQueue is not empty
          case NonFatal(e) => logError("Unexpected error in " + threadName, e)
        }
      }
    }
  }
  
  protected def doLoop: Unit
  
  def start: Unit = {
    if (stopped.get) {
      throw new IllegalStateException(threadName + " has already been stopped!")
    }
    // Call onStart before starting the event thread to make sure it happens before onReceive
    onStart
    eventThread.start()
  }
  
  def getStartTime: Long = startTime
  
  def stop: Unit = {
    if (stopped.compareAndSet(false, true)) {
      eventThread.interrupt()
      var onStopCalled = false
      Utils.tryCatch {
        eventThread.join()
        onStopCalled = true
        onStop
      } {
        case _: InterruptedException =>
          Thread.currentThread.interrupt()
          if (!onStopCalled) {
            onStop
          }
        case t => throw t
      }
    } else {
      // Keep quiet to allow calling `stop` multiple times.
    }
  }
  
  def isActive: Boolean = eventThread.isAlive
  
  protected def onStart: Unit = {}

  protected def onStop: Unit = {}

}

trait LoopCollection[E] extends Logging {
  
  def post(event: E): Unit
  
  val threadName: String
  
  protected def onReceiveSafety(event: E): Unit = Utils.tryCatch(onReceive(event)){
    case _: IngoreEventException => //ignore it
    case NonFatal(e) => Utils.tryCatch(onError(e)){
      case NonFatal(_) => logError("Unexpected error in " + threadName, e)
    }
  }
  
  protected def onReceive(event: E): Unit
  
  def remove(obj: E): Unit
  
  def remove(filter: E => Boolean): Unit
  
  protected def onError(e: Throwable): Unit
} 
Example 94
Source File: JavaScriptProjectEditor.scala    From rug   with GNU General Public License v3.0 5 votes vote down vote up
package com.atomist.rug.runtime.js

import com.atomist.param.ParameterValues
import com.atomist.project.archive.{DefaultAtomistConfig, RugResolver}
import com.atomist.project.edit._
import com.atomist.rug.kind.core.ProjectMutableView
import com.atomist.source.ArtifactSource
import com.atomist.util.Timing._
import jdk.nashorn.api.scripting.ScriptObjectMirror

import scala.util.control.NonFatal


class JavaScriptProjectEditor(jsc: JavaScriptContext,
                              jsVar: ScriptObjectMirror,
                              rugAs: ArtifactSource,
                              resolver: Option[RugResolver])
  extends JavaScriptProjectOperation(jsc, jsVar, rugAs)
    with ProjectEditorSupport {

  override def applicability(as: ArtifactSource): Applicability = Applicability.OK

  override protected def modifyInternal(targetProject: ArtifactSource, poa: ParameterValues): ModificationAttempt = {
    val validated = addDefaultParameterValues(poa)
    validateParameters(validated)

    val (result, elapsedTime) = time {
      val pmv = new ProjectMutableView(rugAs, targetProject, atomistConfig = DefaultAtomistConfig, Some(this), rugResolver = resolver)
      try {
        // Important that we don't invoke edit on the prototype as otherwise all constructor effects are lost!
        invokeMemberFunction(jsc, jsVar, "edit", Some(validated), wrapProject(pmv))
        if (pmv.currentBackingObject == targetProject) {
          NoModificationNeeded("OK")
        } else {
          SuccessfulModification(pmv.currentBackingObject, pmv.changeLogEntries)
        }
      } catch {
        case sle: SourceLanguageRuntimeException =>
          throw sle
        case NonFatal(t) =>
          throw new RuntimeException(s"Editor '$name' failed due to ${t.getMessage}", t)
      }
    }
    logger.debug(s"$name modifyInternal took ${elapsedTime}ms")
    result
  }
} 
Example 95
Source File: DurationFormat.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.ops.v4

import play.api.libs.json._

import scala.concurrent.duration._
import scala.concurrent.duration.ops.v4.DurationOps
import scala.util.control.NonFatal
import scala.util.{Failure, Success}


object DurationFormat {

  def asString(duration: Duration): String = duration match {
    case inf: Duration.Infinite => asString(inf)
    case finite => finite.toString
  }

  def asString(inf: Duration.Infinite): String = inf.toString.substring("Duration.".length)

  object string extends StringDurationFormat {
    implicit val durationFormat: Format[Duration] = Format(durationReads, durationWrites)
    implicit val finiteDurationFormat: Format[FiniteDuration] = Format(finiteDurationReads, durationWrites)
  }

  object array extends ArrayDurationFormat {
    implicit val durationFormat: Format[Duration] = Format(durationReads, durationWrites)
    implicit val finiteDurationFormat: Format[FiniteDuration] = Format(finiteDurationReads, durationWrites)
  }
} 
Example 96
Source File: RecoverOps.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.ops.v4

import play.api.data.validation.ValidationError
import play.api.libs.json._

import scala.language.higherKinds
import scala.reflect.ClassTag
import scala.util.control.NonFatal

trait RecoverOps[F[x] <: Reads[x], A] extends Any {

  def unsafeReader: Reads[A]

  
  def expectedTypeError(cls: Class[_], args: Any*): JsError = {
    expectedTypeError(safeSimpleClassName(cls), args: _*)
  }
}

class ReadsRecoverOps[A](override val unsafeReader: Reads[A]) extends AnyVal with RecoverOps[Reads, A] {
  final override protected def build(safeReader: Reads[A]): Reads[A] = safeReader
}

class FormatRecoverOps[A](val unsafeFormat: Format[A]) extends AnyVal with RecoverOps[Format, A] {
  final override def unsafeReader: Reads[A] = unsafeFormat
  final override protected def build(safeReader: Reads[A]): Format[A] = Format(safeReader, unsafeFormat)
}

class OFormatRecoverOps[A](val unsafeFormat: OFormat[A]) extends AnyVal with RecoverOps[OFormat, A] {
  final override def unsafeReader: Reads[A] = unsafeFormat
  final override protected def build(safeReader: Reads[A]): OFormat[A] = OFormat(safeReader, unsafeFormat)
} 
Example 97
Source File: RecoverOps.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.ops.v4

import play.api.libs.json._

import scala.language.higherKinds
import scala.reflect.ClassTag
import scala.util.control.NonFatal

trait RecoverOps[F[x] <: Reads[x], A] extends Any {

  def unsafeReader: Reads[A]

  
  def recoverWith(
    recoverFn: PartialFunction[Throwable, JsResult[A]]
  ): Reads[A] = build {
    Reads { json: JsValue =>
      try {
        unsafeReader.reads(json)
      } catch {
        case NonFatal(ex) if recoverFn isDefinedAt ex => recoverFn(ex)
      }
    }
  }

  // Subclasses need to define how to build an instance of F[A] from a Reads[A]
  protected def build(safeReader: Reads[A]): F[A]
}

class ReadsRecoverOps[A](override val unsafeReader: Reads[A]) extends AnyVal with RecoverOps[Reads, A] {
  final override protected def build(safeReader: Reads[A]): Reads[A] = safeReader
}

class FormatRecoverOps[A](val unsafeFormat: Format[A]) extends AnyVal with RecoverOps[Format, A] {
  final override def unsafeReader: Reads[A] = unsafeFormat
  final override protected def build(safeReader: Reads[A]): Format[A] = Format(safeReader, unsafeFormat)
}

class OFormatRecoverOps[A](val unsafeFormat: OFormat[A]) extends AnyVal with RecoverOps[OFormat, A] {
  final override def unsafeReader: Reads[A] = unsafeFormat
  final override protected def build(safeReader: Reads[A]): OFormat[A] = OFormat(safeReader, unsafeFormat)
} 
Example 98
Source File: RecoverOps.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.ops.v4

import play.api.libs.json._

import scala.language.higherKinds
import scala.reflect.ClassTag
import scala.util.control.NonFatal

trait RecoverOps[F[x] <: Reads[x], A] extends Any {

  def unsafeReader: Reads[A]

  
  def expectedTypeError(cls: Class[_], args: Any*): JsError = {
    expectedTypeError(safeSimpleClassName(cls), args: _*)
  }
}

class ReadsRecoverOps[A](override val unsafeReader: Reads[A]) extends AnyVal with RecoverOps[Reads, A] {
  final override protected def build(safeReader: Reads[A]): Reads[A] = safeReader
}

class FormatRecoverOps[A](val unsafeFormat: Format[A]) extends AnyVal with RecoverOps[Format, A] {
  final override def unsafeReader: Reads[A] = unsafeFormat
  final override protected def build(safeReader: Reads[A]): Format[A] = Format(safeReader, unsafeFormat)
}

class OFormatRecoverOps[A](val unsafeFormat: OFormat[A]) extends AnyVal with RecoverOps[OFormat, A] {
  final override def unsafeReader: Reads[A] = unsafeFormat
  final override protected def build(safeReader: Reads[A]): OFormat[A] = OFormat(safeReader, unsafeFormat)
} 
Example 99
Source File: DurationFormat.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.ops.v4

import play.api.libs.json._
import play.api.libs.json.ops.v4.ImplicitTupleFormats._

import scala.concurrent.duration._
import scala.concurrent.duration.ops.v4.DurationOps
import scala.util.control.NonFatal
import scala.util.{Failure, Success}


object DurationFormat {

  def asString(duration: Duration): String = duration match {
    case inf: Duration.Infinite => asString(inf)
    case finite => finite.toString
  }

  def asString(inf: Duration.Infinite): String = inf.toString.substring("Duration.".length)

  object string extends StringDurationFormat {
    implicit val durationFormat: Format[Duration] = Format(durationReads, durationWrites)
    implicit val finiteDurationFormat: Format[FiniteDuration] = Format(finiteDurationReads, durationWrites)
  }

  object array extends ArrayDurationFormat {
    implicit val durationFormat: Format[Duration] = Format(durationReads, durationWrites)
    implicit val finiteDurationFormat: Format[FiniteDuration] = Format(finiteDurationReads, durationWrites)
  }
} 
Example 100
Source File: ConsulHttpProtocol.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.dao

import java.util.UUID

import spray.json._

import scala.util.control.NonFatal
import java.util.Base64

trait ConsulHttpProtocol extends DefaultJsonProtocol {

  implicit val uuidFormat = new JsonFormat[UUID] {
    override def read(json: JsValue): UUID = json match {
      case JsString(uuid) ⇒ try {
        UUID.fromString(uuid)
      } catch {
        case NonFatal(e) ⇒ deserializationError("Expected UUID, but got " + uuid)
      }
      case x ⇒ deserializationError("Expected UUID as JsString, but got " + x)
    }

    override def write(obj: UUID): JsValue = JsString(obj.toString)
  }

  implicit val binaryDataFormat = new JsonFormat[BinaryData] {
    override def read(json: JsValue): BinaryData = json match {
      case JsString(data) ⇒ try {
        BinaryData(Base64.getMimeDecoder.decode(data))
      } catch {
        case NonFatal(e) ⇒ deserializationError("Expected base64 encoded binary data, but got " + data)
      }
      case x ⇒ deserializationError("Expected base64 encoded binary data as JsString, but got " + x)
    }

    override def write(obj: BinaryData): JsValue = JsString(Base64.getMimeEncoder.encodeToString(obj.data))
  }

  implicit val serviceFormat = jsonFormat(
    (node: String, address: String, serviceId: String, serviceName: String, serviceTags: Option[Set[String]], serviceAddress: String, servicePort: Int) ⇒
      ServiceInstance(node, address, serviceId, serviceName, serviceTags.getOrElse(Set.empty), serviceAddress, servicePort),
    "Node", "Address", "ServiceID", "ServiceName", "ServiceTags", "ServiceAddress", "ServicePort"
  )
  implicit val httpCheckFormat = jsonFormat(HttpHealthCheck, "HTTP", "Interval")
  implicit val scriptCheckFormat = jsonFormat(ScriptHealthCheck, "Script", "Interval")
  implicit val ttlCheckFormat = jsonFormat(TTLHealthCheck, "TTL")
  implicit val checkWriter = lift {
    new JsonWriter[HealthCheck] {
      override def write(obj: HealthCheck): JsValue = obj match {
        case obj: ScriptHealthCheck ⇒ obj.toJson
        case obj: HttpHealthCheck   ⇒ obj.toJson
        case obj: TTLHealthCheck    ⇒ obj.toJson
      }
    }
  }
  implicit val serviceRegistrationFormat = jsonFormat(ServiceRegistration, "Name", "ID", "Tags", "Address", "Port", "Check")
  implicit val sessionCreationFormat = jsonFormat(SessionCreation, "LockDelay", "Name", "Node", "Checks", "Behavior", "TTL")
  implicit val keyDataFormat = jsonFormat(KeyData, "Key", "CreateIndex", "ModifyIndex", "LockIndex", "Flags", "Value", "Session")
  implicit val sessionInfoFormat = jsonFormat(SessionInfo, "LockDelay", "Checks", "Node", "ID", "CreateIndex", "Name", "Behavior", "TTL")
} 
Example 101
Source File: ConstFuture.scala    From arrows   with Apache License 2.0 5 votes vote down vote up
package arrows.twitter

import com.twitter.util.Future
import com.twitter.util.Try
import com.twitter.util.Awaitable
import com.twitter.util.Duration
import com.twitter.util.Return
import scala.runtime.NonLocalReturnControl
import com.twitter.concurrent.Scheduler
import com.twitter.util.FutureNonLocalReturnControl
import com.twitter.util.Local
import com.twitter.util.Monitor
import scala.util.control.NonFatal
import com.twitter.util.Promise
import com.twitter.util.Throw

trait ConstFuture[T] extends Future[T] {
  final def isReady(implicit permit: Awaitable.CanAwait): Boolean = true

  override final def ready(timeout: Duration)(implicit permit: Awaitable.CanAwait) = this
  final def poll: Option[com.twitter.util.Try[T]] = Some(toTry)

  protected def toTry: Try[T]

  final def respond(k: Try[T] => Unit): Future[T] = {
    val saved = Local.save()
    Scheduler.submit(new Runnable {
      def run(): Unit = {
        val current = Local.save()
        Local.restore(saved)
        try k(toTry)
        catch Monitor.catcher
        finally Local.restore(current)
      }
    })
    this
  }

  final def raise(interrupt: Throwable): Unit = ()

  final def transform[B](f: Try[T] => Future[B]): Future[B] = {
    val p = new Promise[B]
    // see the note on `respond` for an explanation of why `Scheduler` is used.
    val saved = Local.save()
    Scheduler.submit(new Runnable {
      def run(): Unit = {
        val current = Local.save()
        Local.restore(saved)
        val computed = try f(toTry)
        catch {
          case e: NonLocalReturnControl[_] => Future.exception(new FutureNonLocalReturnControl(e))
          case NonFatal(e)                 => Future.exception(e)
          case t: Throwable =>
            Monitor.handle(t)
            throw t
        } finally Local.restore(current)
        p.become(computed)
      }
    })
    p
  }
}

class ReturnFuture[T](r: T) extends ConstFuture[T] {
  override final def result(timeout: Duration)(implicit permit: Awaitable.CanAwait): T = r
  override final def toTry = Return(r)
}

class ThrowFuture[T](ex: Throwable) extends ConstFuture[T] {
  override final def result(timeout: Duration)(implicit permit: Awaitable.CanAwait): T = throw ex
  override final def toTry = Throw(ex)
} 
Example 102
Source File: package.scala    From tscfg   with Apache License 2.0 5 votes vote down vote up
package tscfg

import java.util.regex.Pattern

import scala.util.control.NonFatal

package object codeDefs {
  private val beginTemplatePattern = Pattern.compile("\\s*//<([^>]+)>.*$")

  private val javaMap = getMap("codeDefs/JavaDefs.java")
  private val scalaMap = getMap("codeDefs/ScalaDefs.scala")

  def javaDef(key: String): String = getDef("java", javaMap, key)

  def scalaDef(key: String): String = getDef("scala", scalaMap, key)

  private def getDef(lang: String, map: Map[String, String], key: String): String = {
    try map(key)
    catch {
      // $COVERAGE-OFF$
      case NonFatal(e) =>
        val keys = map.keySet.toList.sorted
        val msg = s"Unexpected: undefined key '$key' for $lang. Defined keys: $keys. Please report this bug"
        throw new RuntimeException(msg, e)
      // $COVERAGE-ON$
    }
  }

  private def getMap(resourceName: String): Map[String, String] = try {
    //println(s"codeDefs.getMap $resourceName")
    val map = collection.mutable.HashMap[String, String]()
    val is = getClass.getClassLoader.getResourceAsStream(resourceName)
    assert(is != null)
    val source = io.Source.fromInputStream(is, "utf-8")
    var key: String = null
    val template = new StringBuilder
    for (line <- source.getLines()) {
      if (key == null) {
        val m = beginTemplatePattern.matcher(line)
        if (m.find) {
          key = m.group(1)
        }
      }
      else if (line.contains("//</" + key + ">")) {
        map.update(key, template.toString)
        key = null
        template.setLength(0)
      }
      else template.append(line).append("\n")
    }
    is.close()
    map.toMap
  }
  catch {
    case NonFatal(ex) =>
      throw new RuntimeException(
        s"Unexpected exception in getMap(resourceName=$resourceName)." +
        " Please report this bug.", ex)
  }
} 
Example 103
Source File: TaskInstances.scala    From shims   with Apache License 2.0 5 votes vote down vote up
package shims.effect.instances

import cats.{Applicative, Monad, Parallel, StackSafeMonad, ~>}
import cats.effect.{Effect, ExitCase, IO, SyncIO}

import scalaz.{Tag, -\/, \/, \/-}
import scalaz.concurrent.Task.ParallelTask
import scalaz.concurrent.{Future, Task}

import shims.conversions.MonadErrorConversions

import java.util.concurrent.atomic.AtomicBoolean

import scala.util.control.NonFatal

trait TaskInstances extends MonadErrorConversions {

  // cribbed from quasar, where it was mostly cribbed from scalaz-task-effect
  implicit object taskEffect extends Effect[Task] with StackSafeMonad[Task] {

    def pure[A](x: A): Task[A] = Task.now(x)

    def handleErrorWith[A](fa: Task[A])(f: Throwable => Task[A]): Task[A] =
      fa.handleWith(functionToPartial(f))

    def raiseError[A](e: Throwable): Task[A] = Task.fail(e)

    // In order to comply with `repeatedCallbackIgnored` law
    // on async, a custom AtomicBoolean is required to ignore
    // second callbacks.
    def async[A](k: (Either[Throwable, A] => Unit) => Unit): Task[A] = Task.async { registered =>
      val a = new AtomicBoolean(true)
      try k(e => if (a.getAndSet(false)) registered(\/.fromEither(e)) else ())
      catch { case NonFatal(t) => registered(-\/(t)) }
    }

    def asyncF[A](k: (Either[Throwable, A] => Unit) => Task[Unit]): Task[A] =
      async(k.andThen(_.unsafePerformAsync(forget)))

    // emulates using attempt
    def bracketCase[A, B](acquire: Task[A])(use: A => Task[B])(release: (A, ExitCase[Throwable]) => Task[Unit]): Task[B] = {
      for {
        a <- acquire
        bOr <- use(a).attempt
        ec = bOr.fold(ExitCase.Error(_), _ => ExitCase.Completed)
        _ <- release(a, ec)
        b <- bOr.fold(Task.fail, Task.now)
      } yield b
    }

    
    def runAsync[A](fa: Task[A])(cb: Either[Throwable, A] => IO[Unit]): SyncIO[Unit] =
      SyncIO {
        fa unsafePerformAsync { disjunction =>
          cb(disjunction.toEither).unsafeRunAsync(forget)
        }
      }

    def runSyncStep[A](fa: Task[A]): IO[Either[Task[A], A]] =
      IO {
        fa.get match {
          case Future.Now(-\/(_)) => Left(fa)

          case other => other.step match {
            case Future.Now(\/-(a)) => Right(a)
            case other => Left(new Task(other))
          }
        }
      }

    override def map[A, B](fa: Task[A])(f: A => B): Task[B] =
      fa.map(f)

    def flatMap[A, B](fa: Task[A])(f: A => Task[B]): Task[B] = fa.flatMap(f)

    override def delay[A](thunk: => A): Task[A] = Task.delay(thunk)

    def suspend[A](thunk: => Task[A]): Task[A] = Task.suspend(thunk)
  }

  implicit val taskParallel: Parallel.Aux[Task, ParallelTask] = new Parallel[Task] {
    import Task.taskParallelApplicativeInstance

    type F[A] = ParallelTask[A]

    val monad: Monad[Task] = taskEffect
    val applicative: Applicative[ParallelTask] = Applicative[ParallelTask]
    val sequential: ParallelTask ~> Task = λ[ParallelTask ~> Task](Tag.unwrap(_))
    val parallel: Task ~> ParallelTask = λ[Task ~> ParallelTask](Tag(_))
  }

  private def functionToPartial[A, B](f: A => B): PartialFunction[A, B] = {
    case a => f(a)
  }

  private def forget[A](x: A): Unit = ()
} 
Example 104
Source File: MTLSpecs.scala    From shims   with Apache License 2.0 5 votes vote down vote up
package shims.effect

import cats.effect.{ContextShift, IO}
import cats.effect.laws.discipline.{arbitrary, AsyncTests, ConcurrentEffectTests, ConcurrentTests}, arbitrary._
import cats.effect.laws.util.{TestContext, TestInstances}, TestInstances._

import cats.{Eq, Functor, Monad}
import cats.instances.either._
import cats.instances.int._
import cats.instances.option._
import cats.instances.tuple._
import cats.instances.unit._
import cats.syntax.functor._

import scalaz.{EitherT, Kleisli, OptionT, StateT, WriterT}

import org.scalacheck.{Arbitrary, Prop}

import org.specs2.Specification
import org.specs2.scalacheck.Parameters
import org.specs2.specification.core.Fragments

import org.typelevel.discipline.Laws
import org.typelevel.discipline.specs2.Discipline

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

import java.io.{ByteArrayOutputStream, PrintStream}

object MTLSpecs extends Specification with Discipline {

  def is =
    br ^ checkAllAsync("OptionT[IO, ?]", implicit ctx => ConcurrentTests[OptionT[IO, ?]].concurrent[Int, Int, Int]) ^
    br ^ checkAllAsync("Kleisli[IO, Int, ?]", implicit ctx => ConcurrentTests[Kleisli[IO, Int, ?]].concurrent[Int, Int, Int]) ^
    br ^ checkAllAsync("EitherT[IO, Throwable, ?]", implicit ctx => ConcurrentEffectTests[EitherT[IO, Throwable, ?]].concurrentEffect[Int, Int, Int]) ^
    br ^ checkAllAsync("StateT[IO, Int, ?]", implicit ctx => AsyncTests[StateT[IO, Int, ?]].async[Int, Int, Int]) ^
    br ^ checkAllAsync("WriterT[IO, Int, ?]", implicit ctx => ConcurrentEffectTests[WriterT[IO, Int, ?]].concurrentEffect[Int, Int, Int])

  def checkAllAsync(name: String, f: TestContext => Laws#RuleSet)(implicit p: Parameters) = {
    val context = TestContext()
    val ruleSet = f(context)

    Fragments.foreach(ruleSet.all.properties.toList) {
      case (id, prop) =>
        s"$name.$id" ! check(Prop(p => silenceSystemErr(prop(p))), p, defaultFreqMapPretty) ^ br
    }
  }

  implicit def iocsForEC(implicit ec: ExecutionContext): ContextShift[IO] =
    IO.contextShift(ec)

  implicit def optionTArbitrary[F[_], A](implicit arbFA: Arbitrary[F[Option[A]]]): Arbitrary[OptionT[F, A]] =
    Arbitrary(arbFA.arbitrary.map(OptionT.optionT(_)))

  implicit def kleisliArbitrary[F[_], R, A](implicit arbRFA: Arbitrary[R => F[A]]): Arbitrary[Kleisli[F, R, A]] =
    Arbitrary(arbRFA.arbitrary.map(Kleisli(_)))

  implicit def eitherTArbitrary[F[_]: Functor, L, A](implicit arbEA: Arbitrary[F[Either[L, A]]]): Arbitrary[EitherT[F, L, A]] =
    Arbitrary(arbEA.arbitrary.map(fe => EitherT.eitherT(fe.map(_.asScalaz))))

  implicit def stateTArbitrary[F[_]: Monad, S, A](implicit arbSFA: Arbitrary[S => F[(S, A)]]): Arbitrary[StateT[F, S, A]] =
    Arbitrary(arbSFA.arbitrary.map(StateT(_)))

  implicit def writerTArbitrary[F[_], L, A](implicit arbFLA: Arbitrary[F[(L, A)]]): Arbitrary[WriterT[F, L, A]] =
    Arbitrary(arbFLA.arbitrary.map(WriterT(_)))

  implicit def kleisliEq[F[_], A](implicit eqv: Eq[F[A]]): Eq[Kleisli[F, Int, A]] =
    Eq.by(_(42))   // totally random and comprehensive seed

  implicit def stateTEq[F[_]: Monad, S, A](implicit eqv: Eq[F[(Int, A)]]): Eq[StateT[F, Int, A]] =
    Eq.by(_.run(42))   // totally random and comprehensive seed

  // copied from cats-effect
  private def silenceSystemErr[A](thunk: => A): A = synchronized {
    // Silencing System.err
    val oldErr = System.err
    val outStream = new ByteArrayOutputStream()
    val fakeErr = new PrintStream(outStream)
    System.setErr(fakeErr)
    try {
      val result = thunk
      System.setErr(oldErr)
      result
    } catch {
      case NonFatal(e) =>
        System.setErr(oldErr)
        // In case of errors, print whatever was caught
        fakeErr.close()
        val out = outStream.toString("utf-8")
        if (out.nonEmpty) oldErr.println(out)
        throw e
    }
  }
} 
Example 105
Source File: TicTacToeApp.scala    From fx-tictactoe   with Apache License 2.0 5 votes vote down vote up
package net.ladstatt.tictactoe

import java.net.URL
import java.util.ResourceBundle
import javafx.application.Application
import javafx.beans.property.SimpleObjectProperty
import javafx.beans.value.{ChangeListener, ObservableValue}
import javafx.fxml.{FXMLLoader, FXML, Initializable}
import javafx.scene.{Scene, Parent}
import javafx.scene.control.{Label, Button}
import javafx.stage.Stage

import scala.util.control.NonFatal


class TicTacToeApp extends javafx.application.Application {

  private val resource: URL = getClass.getResource("TicTacToeApp.fxml")
  assert(resource != null)
  val loader = new FXMLLoader(resource)

  override def start(stage: Stage): Unit =
    try {
      stage.setTitle("TicTacToe App")
      loader.load[Parent]()
      stage.setScene(new Scene(loader.getRoot[Parent]))
      stage.show()
    } catch {
      case NonFatal(e) => e.printStackTrace()
    }

} 
Example 106
Source File: ConcurrentMapBackedCache.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
private class ConcurrentMapBackedCache[V](store: ConcurrentMap[Any, Future[V]]) {
  val cache = this

  def apply(key: Any) = new Keyed(key)

  class Keyed(key: Any) {
    def apply(magnet: => ValueMagnet[V])(implicit ec: ExecutionContext): Future[V] =
      cache.apply(
        key,
        () =>
          try magnet.future
          catch { case NonFatal(e) => Future.failed(e) })
  }

  def apply(key: Any, genValue: () => Future[V])(implicit ec: ExecutionContext): Future[V] = {
    store.computeIfAbsent(
      key,
      new java.util.function.Function[Any, Future[V]]() {
        override def apply(key: Any): Future[V] = {
          val future = genValue()
          future.onComplete { value =>
            // in case of exceptions we remove the cache entry (i.e. try again later)
            if (value.isFailure) store.remove(key, future)
          }
          future
        }
      })
  }

  def remove(key: Any) = Option(store.remove(key))

  def size = store.size
}

class ValueMagnet[V](val future: Future[V])
object ValueMagnet {
  import scala.language.implicitConversions

  implicit def fromAny[V](block: V): ValueMagnet[V] = fromFuture(Future.successful(block))
  implicit def fromFuture[V](future: Future[V]): ValueMagnet[V] = new ValueMagnet(future)
} 
Example 107
Source File: DockerVersion.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.standalone

import scala.util.control.NonFatal

case class DockerVersion(major: Int, minor: Int, patch: Int) extends Ordered[DockerVersion] {
  import scala.math.Ordered.orderingToOrdered
  def compare(that: DockerVersion): Int =
    (this.major, this.minor, this.patch) compare (that.major, that.minor, that.patch)

  override def toString = s"$major.$minor.$patch"
}

object DockerVersion {
  implicit val ord: Ordering[DockerVersion] = Ordering.by(unapply)
  private val pattern = ".*Docker version ([\\d.]+).*".r

  def apply(str: String): DockerVersion = {
    try {
      val parts = if (str != null && str.nonEmpty) str.split('.') else Array[String]()
      val major = if (parts.length >= 1) parts(0).toInt else 0
      val minor = if (parts.length >= 2) parts(1).toInt else 0
      val patch = if (parts.length >= 3) parts(2).toInt else 0
      DockerVersion(major, minor, patch)
    } catch {
      case NonFatal(_) => throw new IllegalArgumentException(s"bad docker version $str")
    }
  }

  def fromVersionCommand(str: String): DockerVersion = {
    val pattern(version) = str
    apply(version)
  }
} 
Example 108
Source File: ClassLoaderUtil.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap

import scala.util.control.NonFatal

 ).map(get) dropWhile { c ⇒
        c != null &&
          (c.getName.startsWith(baseName) ||
            c.getName.startsWith("scala.Option") ||
            c.getName.startsWith("scala.collection.Iterator") ||
            c.getName.startsWith("ml.combust.bundle.util.ClassLoaderUtil"))
      } next () match {
        case null => getClass.getClassLoader
        case c => c.getClassLoader
      }

    Option(Thread.currentThread.getContextClassLoader) orElse
      (getCallerClass map findCaller) getOrElse
      getClass.getClassLoader
  }

  val getCallerClass: Option[Int ⇒ Class[_]] = {
    try {
      val c = Class.forName("sun.reflect.Reflection")
      val m = c.getMethod("getCallerClass", Array(classOf[Int]): _*)
      Some((i: Int) ⇒ m.invoke(null, Array[AnyRef](i.asInstanceOf[java.lang.Integer]): _*).asInstanceOf[Class[_]])
    } catch {
      case NonFatal(e) ⇒ None
    }
  }
} 
Example 109
Source File: TestingSupport.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test.sbt

import scala.util.control.NonFatal
import scala.util.{ Failure, Try }

object TestingSupport {
  def test[T](l: String)(body: => Unit): Try[Unit] =
    Try {
      body
      println(s"${green("+")} $l")
    }.recoverWith {
      case NonFatal(e) =>
        println(s"${red("-")} $l: ${e.getMessage}")
        e.printStackTrace()
        Failure(e)
    }

  def run(tests: Try[Unit]*) = {
    val failed       = tests.count(_.isFailure)
    val successful   = tests.count(_.isSuccess)
    val failedCount  = if (failed > 0) red(s"failed: $failed") else s"failed: $failed"
    val successCount = if (successful > 0) green(s"successful: $successful") else s"successful: $successful"
    println(s"Summary: $failedCount, $successCount")
    if (failed > 0)
      throw new AssertionError(s"$failed tests failed")
  }

  def assertEquals(what: String, actual: => Any, expected: Any) =
    assert(actual == expected, s"$what:\n  expected: `$expected`\n  actual  : `$actual`")

  def colored(code: String)(str: String) = s"$code$str${Console.RESET}"
  lazy val red                           = colored(Console.RED) _
  lazy val green                         = colored(Console.GREEN) _
  lazy val cyan                          = colored(Console.CYAN) _
  lazy val blue                          = colored(Console.BLUE) _

  def reset(str: String) =
    s"${Console.RESET}$str"
} 
Example 110
Source File: HttpBackend.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.http

import scala.concurrent.ExecutionContext
import scala.language.higherKinds
import scala.util.control.NonFatal


trait HttpBackend[Req, Resp, F[_]] { self =>

  type Filter  = HttpFilter[Req, Resp, F]
  type Context = HttpContext[Req, Resp, F]

  protected implicit val httpRequestAdapter: HttpRequestAdapter[Req]

  def newResponse(status: HttpStatus, content: String = ""): Resp

  def toFuture[A](a: A): F[A]
  // Convert Scala's Future into the this backend's Future
  def toFuture[A](a: scala.concurrent.Future[A], e: ExecutionContext): F[A]
  def toScalaFuture[A](a: F[A]): scala.concurrent.Future[A]
  def wrapException(e: Throwable): F[Resp]
  def rescue(body: => F[Resp]): F[Resp] = {
    try {
      body
    } catch {
      case NonFatal(e) => wrapException(e)
    }
  }

  def isFutureType(x: Class[_]): Boolean
  def isScalaFutureType(x: Class[_]): Boolean = {
    classOf[scala.concurrent.Future[_]].isAssignableFrom(x)
  }
  // Returns true if the given class is the natively supported response type in this backend
  def isRawResponseType(x: Class[_]): Boolean

  // Map Future[A] into Future[B]
  def mapF[A, B](f: F[A], body: A => B): F[B]

  // Create a new Filter for this backend
  def newFilter(body: (Req, HttpContext[Req, Resp, F]) => F[Resp]): Filter = {
    HttpFilter.newFilter[Req, Resp, F](self, body)
  }
  // Create a new default filter just for processing preceding filters
  def defaultFilter: Filter = HttpFilter.defaultFilter(self)

  // Create a new default context that process the given request
  def newContext(body: Req => F[Resp]): Context = HttpContext.newContext[Req, Resp, F](self, body)

  // Prepare a thread-local holder for passing parameter values
  def withThreadLocalStore(request: => F[Resp]): F[Resp]

  // Set a thread-local context parameter value
  def setThreadLocal[A](key: String, value: A): Unit

  // Get a thread-local context parameter
  def getThreadLocal[A](key: String): Option[A]
}

object HttpBackend {
  // Pre-defined keys for the thread-local storage
  private[http] val TLS_KEY_RPC              = "rpc"
  private[http] val TLS_KEY_SERVER_EXCEPTION = "server_exception"
} 
Example 111
Source File: Control.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.control
import scala.util.control.NonFatal


object Control {
  def withResource[R <: AutoCloseable, U](resource: R)(body: R => U): U = {
    try {
      body(resource)
    } finally {
      if (resource != null) {
        resource.close()
      }
    }
  }

  def withResources[R1 <: AutoCloseable, R2 <: AutoCloseable, U](resource1: R1, resource2: R2)(
      body: (R1, R2) => U
  ): U = {
    try {
      body(resource1, resource2)
    } finally {
      closeResources(resource1, resource2)
    }
  }

  def closeResources[R <: AutoCloseable](resources: R*): Unit = {
    if (resources != null) {
      var exceptionList = List.empty[Throwable]
      resources.map { x =>
        try {
          if (x != null) {
            x.close()
          }
        } catch {
          case NonFatal(e) =>
            exceptionList = e :: exceptionList
        }
      }
      if (exceptionList.nonEmpty) {
        throw MultipleExceptions(exceptionList)
      }
    }
  }
} 
Example 112
Source File: OkHttpRetryInterceptor.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.http.okhttp

import okhttp3.{Interceptor, Response}
import wvlet.airframe.control.ResultClass
import wvlet.airframe.control.Retry.RetryContext
import wvlet.airframe.http.HttpClientMaxRetryException

import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}

class OkHttpRetryInterceptor(retry: RetryContext) extends Interceptor {

  private def dispatch(retryContext: RetryContext, chain: Interceptor.Chain): Response = {
    val request = chain.request()

    val response = Try(chain.proceed(request))
    val resultClass = response match {
      case Success(r) =>
        try {
          retryContext.resultClassifier(r)
        } catch {
          case NonFatal(e) =>
            retryContext.errorClassifier(e)
        }
      case Failure(e) =>
        retryContext.errorClassifier(e)
    }

    resultClass match {
      case ResultClass.Succeeded =>
        response.get
      case ResultClass.Failed(isRetryable, cause, extraWait) =>
        if (!retryContext.canContinue) {
          // Reached the max retry
          throw HttpClientMaxRetryException(OkHttpResponseWrapper(response.toOption.orNull), retryContext, cause)
        } else if (!isRetryable) {
          // Non-retryable failure
          throw cause
        } else {
          // Update the retry count
          val nextRetryContext = retryContext.withExtraWait(extraWait).nextRetry(cause)
          // Wait until the next retry
          Thread.sleep(nextRetryContext.nextWaitMillis)
          // Run the same request again
          dispatch(nextRetryContext, chain)
        }
    }
  }

  override def intercept(chain: Interceptor.Chain): Response = {
    val request      = chain.request()
    val retryContext = retry.init(Option(request))
    dispatch(retryContext, chain)
  }
} 
Example 113
Source File: Authenticator.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.access.authenticator

import com.typesafe.scalalogging.StrictLogging
import org.coursera.common.concurrent.Futures
import org.coursera.naptime.NaptimeActionException
import org.coursera.naptime.access.authenticator.combiner.And
import org.coursera.naptime.access.authenticator.combiner.AnyOf
import org.coursera.naptime.access.authenticator.combiner.FirstOf
import play.api.http.Status.FORBIDDEN
import play.api.http.Status.UNAUTHORIZED
import play.api.mvc.RequestHeader

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.control.NonFatal


  def maybeAuthenticate(requestHeader: RequestHeader)(
      implicit ec: ExecutionContext): Future[Option[Either[NaptimeActionException, A]]]

  def collect[B](f: PartialFunction[A, B]): Authenticator[B] = {
    val self = this
    new Authenticator[B] {
      override def maybeAuthenticate(requestHeader: RequestHeader)(
          implicit ec: ExecutionContext): Future[Option[Either[NaptimeActionException, B]]] = {

        Futures
          .safelyCall(self.maybeAuthenticate(requestHeader))
          .map(_.map(_.right.map(f.lift)))
          .map {
            case Some(Right(None))    => None
            case Some(Right(Some(b))) => Some(Right(b))
            case Some(Left(error))    => Some(Left(error))
            case None                 => None
          }
          .recover(Authenticator.errorRecovery)
      }
    }
  }

  def map[B](f: A => B): Authenticator[B] = collect(PartialFunction(f))

}

object Authenticator extends StrictLogging with AnyOf with FirstOf with And {

  def apply[P, A](
      parser: HeaderAuthenticationParser[P],
      decorator: Decorator[P, A]): Authenticator[A] = {

    new Authenticator[A] {
      def maybeAuthenticate(requestHeader: RequestHeader)(
          implicit ec: ExecutionContext): Future[Option[Either[NaptimeActionException, A]]] = {

        parser.parseHeader(requestHeader) match {
          case ParseResult.Success(parsed) =>
            Futures
              .safelyCall(decorator(parsed))
              .map { either =>
                either.left
                  .map { message =>
                    Some(Left(NaptimeActionException(FORBIDDEN, None, Some(message))))
                  }
                  .right
                  .map { decorated =>
                    Some(Right(decorated))
                  }
                  .merge
              }
              .recover(errorRecovery)
          case ParseResult.Error(message, status) =>
            Future.successful(
              Some(Left(NaptimeActionException(status, Some("auth.parse"), Some(message)))))
          case ParseResult.Skip => Future.successful(None)
        }
      }
    }

  }

  private[access] def authenticateAndRecover[A](
      authenticator: Authenticator[A],
      requestHeader: RequestHeader)(
      implicit ec: ExecutionContext): Future[Option[Either[NaptimeActionException, A]]] = {
    Futures
      .safelyCall(authenticator.maybeAuthenticate(requestHeader))
      .recover(errorRecovery)
  }

  def errorRecovery[A]: PartialFunction[Throwable, Option[Either[NaptimeActionException, A]]] = {
    case NonFatal(e) =>
      logger.error("Unexpected authentication error", e)
      val message = s"Unexpected authentication error: ${e.getMessage}"
      Some(Left(NaptimeActionException(UNAUTHORIZED, Some("auth.perms"), Some(message))))
  }

} 
Example 114
Source File: AttributesProvider.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.router2

import com.typesafe.scalalogging.StrictLogging
import org.coursera.courier.templates.DataTemplates.DataConversion
import org.coursera.naptime.courier.CourierFormats
import org.coursera.naptime.schema.Attribute
import org.coursera.naptime.schema.JsValue
import play.api.libs.json.JsError
import play.api.libs.json.JsObject
import play.api.libs.json.JsSuccess
import play.api.libs.json.Json

import scala.util.control.NonFatal

object AttributesProvider extends StrictLogging {

  val SCALADOC_ATTRIBUTE_NAME = "scaladocs"

  lazy val scaladocs: Map[String, JsObject] = {
    val scaladocPath = "/naptime.scaladoc.json"
    (for {
      stream <- Option(getClass.getResourceAsStream(scaladocPath))
      json <- try {
        Some(Json.parse(stream))
      } catch {
        case NonFatal(exception) =>
          logger.warn(
            s"Could not parse contents of file " +
              s"$scaladocPath as JSON")
          None
      } finally {
        stream.close()
      }
      scaladocCollection <- json.validate[Map[String, JsObject]] match {
        case JsSuccess(deserialized, _) =>
          Some(deserialized)
        case JsError(_) =>
          logger.warn(
            s"Could not deserialize contents of file " +
              s"$scaladocPath as `Map[String, JsObject]`")
          None
      }
    } yield {
      scaladocCollection
    }).getOrElse(Map.empty)
  }

  def getResourceAttributes(className: String): Seq[Attribute] = {
    scaladocs
      .get(className)
      .map(value => Attribute(SCALADOC_ATTRIBUTE_NAME, Some(jsObjToJsValue(value))))
      .toList
  }

  def getMethodAttributes(className: String, methodName: String): Seq[Attribute] = {
    scaladocs
      .get(s"$className.$methodName")
      .map(value => Attribute(SCALADOC_ATTRIBUTE_NAME, Some(jsObjToJsValue(value))))
      .toList
  }

  private[this] def jsObjToJsValue(jsObj: JsObject): JsValue = {
    JsValue.build(CourierFormats.objToDataMap(jsObj), DataConversion.SetReadOnly)
  }
} 
Example 115
Source File: JsonUtils.scala    From azure-event-hubs-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.eventhubs

import org.apache.spark.eventhubs.{ NameAndPartition, _ }
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import scala.collection.mutable
import scala.util.control.NonFatal


  def partitionSeqNos(jsonStr: String): Map[NameAndPartition, SequenceNumber] = {
    try {
      Serialization.read[Map[String, Map[PartitionId, SequenceNumber]]](jsonStr).flatMap {
        case (name, partSeqNos) =>
          partSeqNos.map {
            case (part, seqNo) =>
              NameAndPartition(name, part) -> seqNo
          }
      }
    } catch {
      case NonFatal(_) =>
        throw new IllegalArgumentException(
          s"failed to parse $jsonStr" +
            s"""Expected e.g. {"ehName":{"0":23,"1":-1},"ehNameB":{"0":-2}}""")
    }
  }
} 
Example 116
Source File: Main.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka

import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Actor
import akka.actor.Terminated
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.ActorRef
import scala.util.control.NonFatal


  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      println("you need to provide exactly one argument: the class of the application supervisor actor")
    } else {
      val system = ActorSystem("Main")
      try {
        val appClass = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.getClassFor[Actor](args(0)).get
        val app = system.actorOf(Props(appClass), "app")
        val terminator = system.actorOf(Props(classOf[Terminator], app), "app-terminator")
      } catch {
        case NonFatal(e) ⇒ system.terminate(); throw e
      }
    }
  }

  class Terminator(app: ActorRef) extends Actor with ActorLogging {
    context watch app
    def receive = {
      case Terminated(_) ⇒
        log.info("application supervisor has terminated, shutting down")
        context.system.terminate()
    }
  }

} 
Example 117
Source File: FutureTimeoutSupport.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.pattern

import scala.concurrent.{ ExecutionContext, Promise, Future }
import akka.actor._
import scala.util.control.NonFatal
import scala.concurrent.duration.FiniteDuration
import java.util.concurrent.CompletionStage
import java.util.concurrent.CompletableFuture
import akka.dispatch.Futures
import java.util.function.BiConsumer

trait FutureTimeoutSupport {
  
  def afterCompletionStage[T](duration: FiniteDuration, using: Scheduler)(value: ⇒ CompletionStage[T])(implicit ec: ExecutionContext): CompletionStage[T] =
    if (duration.isFinite() && duration.length < 1) {
      try value catch { case NonFatal(t) ⇒ Futures.failedCompletionStage(t) }
    } else {
      val p = new CompletableFuture[T]
      using.scheduleOnce(duration) {
        try {
          val future = value
          future.whenComplete(new BiConsumer[T, Throwable] {
            override def accept(t: T, ex: Throwable): Unit = {
              if (t != null) p.complete(t)
              if (ex != null) p.completeExceptionally(ex)
            }
          })
        } catch {
          case NonFatal(ex) ⇒ p.completeExceptionally(ex)
        }
      }
      p
    }
} 
Example 118
Source File: SerializedSuspendableExecutionContext.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.util

import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal
import scala.annotation.{ tailrec, switch }
import akka.dispatch.AbstractNodeQueue

private[akka] object SerializedSuspendableExecutionContext {
  final val Off = 0
  final val On = 1
  final val Suspended = 2

  def apply(throughput: Int)(implicit context: ExecutionContext): SerializedSuspendableExecutionContext =
    new SerializedSuspendableExecutionContext(throughput)(context match {
      case s: SerializedSuspendableExecutionContext ⇒ s.context
      case other ⇒ other
    })
}


  final def size(): Int = count()

  override final def toString: String = (state.get: @switch) match {
    case 0 ⇒ "Off"
    case 1 ⇒ "On"
    case 2 ⇒ "Off & Suspended"
    case 3 ⇒ "On & Suspended"
  }
} 
Example 119
Source File: UdpSender.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.nio.channels.DatagramChannel
import scala.collection.immutable
import scala.util.control.NonFatal
import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics }
import akka.io.Inet.SocketOption
import akka.io.Udp._
import akka.actor._


private[io] class UdpSender(
  val udp: UdpExt,
  channelRegistry: ChannelRegistry,
  commander: ActorRef,
  options: immutable.Traversable[SocketOption])
  extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] {

  val channel = {
    val datagramChannel = DatagramChannel.open
    datagramChannel.configureBlocking(false)
    val socket = datagramChannel.socket
    options foreach { _.beforeDatagramBind(socket) }

    datagramChannel
  }
  channelRegistry.register(channel, initialOps = 0)

  def receive: Receive = {
    case registration: ChannelRegistration ⇒
      options.foreach {
        case v2: Inet.SocketOptionV2 ⇒ v2.afterConnect(channel.socket)
        case _ ⇒
      }
      commander ! SimpleSenderReady
      context.become(sendHandlers(registration))
  }

  override def postStop(): Unit = if (channel.isOpen) {
    log.debug("Closing DatagramChannel after being stopped")
    try channel.close()
    catch {
      case NonFatal(e) ⇒ log.debug("Error closing DatagramChannel: {}", e)
    }
  }
} 
Example 120
Source File: IAMClient.scala    From play-zhewbacca   with MIT License 5 votes vote down vote up
package org.zalando.zhewbacca

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.{Inject, Singleton}

import akka.actor.ActorSystem
import akka.pattern.CircuitBreaker
import org.zalando.zhewbacca.metrics.PluggableMetrics
import play.api.http.Status._
import play.api.libs.ws.WSClient
import play.api.{Configuration, Logger}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

import atmos.dsl._
import atmos.dsl.Slf4jSupport._


@Singleton
class IAMClient @Inject() (
    config: Configuration,
    pluggableMetrics: PluggableMetrics,
    ws: WSClient,
    actorSystem: ActorSystem,
    implicit val ec: ExecutionContext) extends (OAuth2Token => Future[Option[TokenInfo]]) {

  val logger: Logger = Logger("security.IAMClient")

  val METRICS_BREAKER_CLOSED = 0
  val METRICS_BREAKER_OPEN = 1
  val circuitStatus = new AtomicInteger()

  pluggableMetrics.gauge {
    circuitStatus.get
  }

  val authEndpoint: String = config.getOptional[String]("authorisation.iam.endpoint").getOrElse(
    throw new IllegalArgumentException("Authorisation: IAM endpoint is not configured"))

  val breakerMaxFailures: Int = config.getOptional[Int]("authorisation.iam.cb.maxFailures").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker max failures is not configured"))

  val breakerCallTimeout: FiniteDuration = config.getOptional[FiniteDuration]("authorisation.iam.cb.callTimeout").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker call timeout is not configured"))

  val breakerResetTimeout: FiniteDuration = config.getOptional[FiniteDuration]("authorisation.iam.cb.resetTimeout").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker reset timeout is not configured"))

  val breakerMaxRetries: TerminationPolicy = config.getOptional[Int]("authorisation.iam.maxRetries").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker max retries is not configured")).attempts

  val breakerRetryBackoff: FiniteDuration = config.getOptional[FiniteDuration]("authorisation.iam.retry.backoff.duration").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker the duration of exponential backoff is not configured"))

  lazy val breaker: CircuitBreaker = new CircuitBreaker(
    actorSystem.scheduler,
    breakerMaxFailures,
    breakerCallTimeout,
    breakerResetTimeout).onHalfOpen {
    circuitStatus.set(METRICS_BREAKER_OPEN)
  }.onOpen {
    circuitStatus.set(METRICS_BREAKER_OPEN)
  }.onClose {
    circuitStatus.set(METRICS_BREAKER_CLOSED)
  }

  implicit val retryRecover = retryFor { breakerMaxRetries } using {
    exponentialBackoff { breakerRetryBackoff }
  } monitorWith {
    logger.logger onRetrying logNothing onInterrupted logWarning onAborted logError
  }

  override def apply(token: OAuth2Token): Future[Option[TokenInfo]] = {
    breaker.withCircuitBreaker(
      pluggableMetrics.timing(
        retryAsync(s"Calling $authEndpoint") {
          ws.url(authEndpoint).withQueryStringParameters(("access_token", token.value)).get()
        })).map { response =>
        response.status match {
          case OK => Some(response.json.as[TokenInfo])
          case _ => None
        }
      } recover {
        case NonFatal(e) =>
          logger.error(s"Exception occurred during validation of token '${token.toSafeString}': $e")
          None // consider any exception as invalid token
      }
  }

} 
Example 121
Source File: RequestValidator.scala    From play-zhewbacca   with MIT License 5 votes vote down vote up
package org.zalando.zhewbacca

import play.api.Logger
import play.api.mvc._

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

private[zhewbacca] object RequestValidator {

  val logger: Logger = Logger(this.getClass)

  def validate[A](scope: Scope, requestHeader: RequestHeader, authProvider: AuthProvider)(implicit ec: ExecutionContext): Future[Either[Result, TokenInfo]] = {
    authProvider.valid(OAuth2Token.from(requestHeader), scope).map {
      case AuthTokenValid(tokenInfo) => Right(tokenInfo)
      case AuthTokenInvalid => Left(Results.Unauthorized)
      case AuthTokenEmpty => Left(Results.Unauthorized)
      case AuthTokenInsufficient => Left(Results.Forbidden)
    } recover {
      case NonFatal(e) =>
        logger.error(e.getMessage, e)
        logger.debug("Request unauthorized because of failure in Authentication Provider")
        Left(Results.Unauthorized)
    }
  }
} 
Example 122
Source File: AkkaHttpClientEndpointsTest.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.akkahttp.client

import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import endpoints4s.algebra
import endpoints4s.algebra.ChunkedJsonEntitiesTestApi

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

class TestClient(settings: EndpointsSettings)(implicit
    EC: ExecutionContext,
    M: Materializer
) extends Endpoints(settings)
    with BasicAuthentication
    with algebra.EndpointsTestApi
    with algebra.BasicAuthenticationTestApi
    with algebra.TextEntitiesTestApi
    with algebra.JsonFromCodecTestApi
    with algebra.SumTypedEntitiesTestApi
    with algebra.circe.JsonFromCirceCodecTestApi
    with JsonEntitiesFromCodecs
    with algebra.circe.JsonEntitiesFromCodecs
    with ChunkedJsonEntities
    with ChunkedJsonEntitiesTestApi
    with algebra.circe.ChunkedJsonEntitiesTestApi

class AkkaHttpClientEndpointsTest
    extends algebra.client.EndpointsTestSuite[TestClient]
    with algebra.client.BasicAuthTestSuite[TestClient]
    with algebra.client.JsonFromCodecTestSuite[TestClient]
    with algebra.client.TextEntitiesTestSuite[TestClient]
    with algebra.client.SumTypedEntitiesTestSuite[TestClient]
    with algebra.client.ChunkedJsonEntitiesTestSuite[TestClient] {

  implicit val system = ActorSystem()
  implicit val ec = system.dispatcher

  val client: TestClient = new TestClient(
    EndpointsSettings(
      AkkaHttpRequestExecutor
        .cachedHostConnectionPool("localhost", wiremockPort)
    )
  )

  val streamingClient: TestClient = new TestClient(
    EndpointsSettings(
      AkkaHttpRequestExecutor
        .cachedHostConnectionPool("localhost", streamingPort)
    )
  )

  def call[Req, Resp](
      endpoint: client.Endpoint[Req, Resp],
      args: Req
  ): Future[Resp] = endpoint(args)

  def encodeUrl[A](url: client.Url[A])(a: A): String = url.encode(a)

  def callStreamedEndpoint[A, B](
      endpoint: streamingClient.Endpoint[A, streamingClient.Chunks[B]],
      req: A
  ): Future[Seq[Either[String, B]]] =
    Source
      .futureSource(endpoint(req))
      .map(Right(_))
      .recover { case NonFatal(t) => Left(t.toString) }
      .runWith(Sink.seq)

  def callStreamedEndpoint[A, B](
      endpoint: streamingClient.Endpoint[streamingClient.Chunks[A], B],
      req: Source[A, _]
  ): Future[B] =
    endpoint(req)

  clientTestSuite()
  basicAuthSuite()
  jsonFromCodecTestSuite()
  textEntitiesTestSuite()
  sumTypedRequestsTestSuite()

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

} 
Example 123
Source File: SplashUtils.scala    From splash   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.shuffle

import java.io.{InputStream, OutputStream}
import java.util.Comparator

import org.apache.spark.internal.Logging

import scala.util.control.NonFatal

object SplashUtils extends Logging {
  def withResources[T <: AutoCloseable, V](r: => T)(f: T => V): V = {
    val resource: T = r
    require(resource != null, "resource is null")
    var exception: Throwable = null
    try {
      f(resource)
    } catch {
      case NonFatal(e) =>
        exception = e
        throw e
      case e: Throwable =>
        logError("fatal error received.", e)
        throw e
    } finally {
      closeAndAddSuppressed(exception, resource)
    }
  }

  private def closeAndAddSuppressed(e: Throwable,
      resource: AutoCloseable): Unit = {
    if (e != null) {
      try {
        resource.close()
      } catch {
        case NonFatal(suppressed) =>
          e.addSuppressed(suppressed)
      }
    } else {
      resource.close()
    }
  }

  
class SplashHashComparator[K] extends Comparator[K] {
  def compare(key1: K, key2: K): Int = {
    val hash1 = SplashUtils.hash(key1)
    val hash2 = SplashUtils.hash(key2)
    if (hash1 < hash2) -1 else if (hash1 == hash2) 0 else 1
  }
}


class SplashSpillableIterator[T](var upstream: Iterator[T],
    val spillInMemoryIterator: Iterator[T] => SpilledFile,
    val getNextUpstream: SpilledFile => Iterator[T])
    extends Iterator[T] with Logging {
  private val spillLock = new Object
  private var spilledFileOpt: Option[SpilledFile] = None
  private var cur: T = readNext()

  def spill(): Option[SpilledFile] = spillLock.synchronized {
    spilledFileOpt match {
      case Some(_) =>
        // has spilled, return None
        None
      case None =>
        // never spilled, now spilling
        val spilledFile = spillInMemoryIterator(upstream)
        spilledFileOpt = Some(spilledFile)
        spilledFileOpt
    }
  }

  def readNext(): T = spillLock.synchronized {
    spilledFileOpt match {
      case Some(spilledFile) =>
        upstream = getNextUpstream(spilledFile)
        spilledFileOpt = None
      case None =>
      // do nothing
    }
    if (upstream.hasNext) {
      upstream.next()
    } else {
      null.asInstanceOf[T]
    }
  }

  override def hasNext: Boolean = cur != null

  override def next(): T = {
    val ret = cur
    cur = readNext()
    ret
  }
} 
Example 124
Source File: either.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.tagless
package effects

import cats.{Eval, MonadError}

import scala.util.control.NonFatal

object either {

  final class ErrorProvider[E] {

    @tagless(true) sealed trait EitherM {
      def either[A](fa: Either[E, A]): FS[A]
      def error[A](e: E): FS[A]
      def catchNonFatal[A](a: Eval[A], f: Throwable => E): FS[A]
    }

    trait Implicits {
      implicit def freeStyleEitherMHandler[M[_]](
          implicit ME: MonadError[M, E]): EitherM.Handler[M] = new EitherM.Handler[M] {
        def either[A](fa: Either[E, A]): M[A] = ME.fromEither(fa)
        def error[A](e: E): M[A]              = ME.raiseError[A](e)
        def catchNonFatal[A](a: Eval[A], f: Throwable => E): M[A] =
          try ME.pure(a.value)
          catch {
            case NonFatal(e) => ME.raiseError(f(e))
          }
      }

      implicit class EitherFSLift[A](fa: Either[E, A])  {
        def liftF[F[_]: EitherM]: F[A] = EitherM[F].either(fa)
      }
    }

    object implicits extends Implicits
  }

  def apply[E]: ErrorProvider[E] = new ErrorProvider
} 
Example 125
Source File: ResponseParser.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import org.json4s.native.JsonMethods.{pretty, render}
import org.json4s.{CustomSerializer, JObject}

import scala.util.control.NonFatal

class ResponseParser[T](f: JObject => T)(implicit m: Manifest[T]) extends CustomSerializer[T](_ => ({
  case o: JObject =>
    try {
      f(o)
    } catch {
      case NonFatal(t) => throw ResponseParseException(pretty(render(o)), t)
    }
}, PartialFunction.empty))

case class ResponseParseException(doc: String, cause: Throwable)
  extends Exception(s"Unable to parse document:\n$doc", cause) 
Example 126
Source File: ReflectionHelper.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas.utils

import scala.util.control.NonFatal

object ReflectionHelper extends Logging {
  import scala.reflect.runtime.universe.{TermName, runtimeMirror, typeOf, TypeTag}
  private val currentMirror = runtimeMirror(getClass.getClassLoader)

  def reflectField[T, OUT](obj: Any, fieldName: String)(implicit ttag: TypeTag[T]): Option[OUT] = {
    val relMirror = currentMirror.reflect(obj)

    try {
      val method = typeOf[T].decl(TermName(fieldName)).asTerm.accessed.asTerm

      Some(relMirror.reflectField(method).get.asInstanceOf[OUT])
    } catch {
      case NonFatal(_) =>
        logWarn(s"Failed to reflect field $fieldName from $obj. " +
          s"Maybe missing to apply necessary patch?")
        None
    }
  }

  def reflectFieldWithContextClassloaderLoosenType(obj: Any, fieldName: String): Option[Any] = {
    val typeMirror = runtimeMirror(Thread.currentThread().getContextClassLoader)
    val instanceMirror = typeMirror.reflect(obj)

    val members = instanceMirror.symbol.typeSignature.members
    val field = members.find(_.name.decodedName.toString == fieldName)
    field match {
      case Some(f) =>
        try {
          Some(instanceMirror.reflectField(f.asTerm).get)
        } catch {
          case NonFatal(e) =>
            logWarn(s"Failed to reflect field $fieldName from $obj. " +
              s"Maybe missing to apply necessary patch? $e")
            None
        }

      case None =>
        logWarn(s"Failed to reflect field $fieldName from $obj. " +
          s"Maybe missing to apply necessary patch?")
        None
    }
  }

  def reflectFieldWithContextClassloader[OUT](obj: Any, fieldName: String): Option[OUT] = {
    reflectFieldWithContextClassloaderLoosenType(obj, fieldName).map(_.asInstanceOf[OUT])
  }

  def reflectMethodWithContextClassloaderLoosenType(
      obj: Any,
      methodName: String,
      params: Any*): Option[Any] = {
    val typeMirror = runtimeMirror(Thread.currentThread().getContextClassLoader)
    val instanceMirror = typeMirror.reflect(obj)

    val members = instanceMirror.symbol.typeSignature.members
    val method = members.find(_.name.decodedName.toString == methodName)
    method match {
      case Some(f) =>
        try {
          Some(instanceMirror.reflectMethod(f.asMethod).apply(params))
        } catch {
          case NonFatal(_) =>
            logWarn(s"Failed to call method $methodName from $obj via reflection. " +
              s"Maybe missing to apply necessary patch?")
            None
        }

      case None =>
        logWarn(s"Failed to call method $methodName from $obj via reflection. " +
          s"Maybe missing to apply necessary patch?")
        None
    }
  }

  def reflectMethodWithContextClassloader[OUT](
      obj: Any,
      fieldName: String,
      params: Any*): Option[OUT] = {
    reflectMethodWithContextClassloaderLoosenType(obj, fieldName, params: _*)
      .map(_.asInstanceOf[OUT])
  }

  def classForName(className: String): Class[_] = {
    Class.forName(className, true, getContextOrClassClassLoader)
  }

  private def getContextOrClassClassLoader: ClassLoader =
    Option(Thread.currentThread().getContextClassLoader).getOrElse(getClass.getClassLoader)
} 
Example 127
Source File: AbstractEventProcessor.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas

import java.util.concurrent.{LinkedBlockingQueue, TimeUnit}

import scala.reflect.ClassTag
import scala.util.control.NonFatal
import com.google.common.annotations.VisibleForTesting
import com.hortonworks.spark.atlas.utils.Logging

abstract class AbstractEventProcessor[T: ClassTag] extends Logging {
  def conf: AtlasClientConf

  private val capacity = conf.get(AtlasClientConf.BLOCKING_QUEUE_CAPACITY).toInt

  private[atlas] val eventQueue = new LinkedBlockingQueue[T](capacity)

  private val timeout = conf.get(AtlasClientConf.BLOCKING_QUEUE_PUT_TIMEOUT).toInt

  private val eventProcessThread = new Thread {
    override def run(): Unit = {
      eventProcess()
    }
  }

  def pushEvent(event: T): Unit = {
    event match {
      case e: T =>
        if (!eventQueue.offer(e, timeout, TimeUnit.MILLISECONDS)) {
          logError(s"Fail to put event $e into queue within time limit $timeout, will throw it")
        }
      case _ => // Ignore other events
    }
  }

  def startThread(): Unit = {
    eventProcessThread.setName(this.getClass.getSimpleName + "-thread")
    eventProcessThread.setDaemon(true)

    val ctxClassLoader = Thread.currentThread().getContextClassLoader
    if (ctxClassLoader != null && getClass.getClassLoader != ctxClassLoader) {
      eventProcessThread.setContextClassLoader(ctxClassLoader)
    }

    eventProcessThread.start()
  }

  protected def process(e: T): Unit

  @VisibleForTesting
  private[atlas] def eventProcess(): Unit = {
    var stopped = false
    while (!stopped) {
      try {
        Option(eventQueue.poll(3000, TimeUnit.MILLISECONDS)).foreach { e =>
          process(e)
        }
      } catch {
        case _: InterruptedException =>
          logDebug("Thread is interrupted")
          stopped = true

        case NonFatal(f) =>
          logWarn(s"Caught exception during parsing event", f)
      }
    }
  }
} 
Example 128
Source File: UnifiedSparkListener.scala    From spark-monitoring   with MIT License 5 votes vote down vote up
package org.apache.spark.listeners

import java.time.Instant

import org.apache.spark.{SparkConf, SparkException, SparkInformation}
import org.apache.spark.internal.Logging
import org.apache.spark.listeners.sink.SparkListenerSink
import org.apache.spark.scheduler._
import org.apache.spark.sql.streaming.StreamingQueryListener
import org.apache.spark.util.JsonProtocol
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, render}

import scala.util.control.NonFatal


class UnifiedSparkListener(override val conf: SparkConf)
  extends UnifiedSparkListenerHandler
    with Logging
    with SparkListenerHandlers
    with StreamingListenerHandlers
    with StreamingQueryListenerHandlers {

  private val listenerSink = this.createSink(this.conf)

  override def onOtherEvent(event: SparkListenerEvent): Unit = {
    // All events in Spark that are not specific to SparkListener go through
    // this method.  The typed ListenerBus implementations intercept and forward to
    // their "local" listeners.
    // We will just handle everything here so we only have to have one listener.
    // The advantage is that this can be registered in extraListeners, so no
    // code change is required to add listener support.
    event match {
      // We will use the ClassTag for the private wrapper class to match
      case this.streamingListenerEventClassTag(e) =>
        this.onStreamingListenerEvent(e)
      case streamingQueryListenerEvent: StreamingQueryListener.Event =>
        this.onStreamingQueryListenerEvent(streamingQueryListenerEvent)
      case sparkListenerEvent: SparkListenerEvent => if (sparkListenerEvent.logEvent) {
        logSparkListenerEvent(sparkListenerEvent)
      }
    }
  }

  private def createSink(conf: SparkConf): SparkListenerSink = {
    val sink = conf.getOption("spark.unifiedListener.sink") match {
      case Some(listenerSinkClassName) => listenerSinkClassName
      case None => throw new SparkException("spark.unifiedListener.sink setting is required")
    }
    logInfo(s"Creating listener sink: ${sink}")
    org.apache.spark.util.Utils.loadExtensions(
      classOf[SparkListenerSink],
      Seq(sink),
      conf).head
  }

  protected def logSparkListenerEvent(
                                       event: SparkListenerEvent,
                                       getTimestamp: () => Instant =
                                       () => Instant.now()): Unit = {
    val json = try {
      // Add a well-known time field.
      Some(
        JsonProtocol.sparkEventToJson(event)
          .merge(render(
            SparkInformation.get() + ("SparkEventTime" -> getTimestamp().toString)
          ))
      )
    } catch {
      case NonFatal(e) =>
        logError(s"Error serializing SparkListenerEvent to JSON: $event", e)
        None
    }

    sendToSink(json)
  }

  private[spark] def sendToSink(json: Option[JValue]): Unit = {
    try {
      json match {
        case Some(j) => {
          logDebug(s"Sending event to listener sink: ${compact(j)}")
          this.listenerSink.logEvent(json)
        }
        case None => {
          logWarning("json value was None")
        }
      }
    } catch {
      case NonFatal(e) =>
        logError(s"Error sending to listener sink: $e")
    }
  }
} 
Example 129
Source File: TryWith.scala    From spark-monitoring   with MIT License 5 votes vote down vote up
package com.microsoft.pnp.util

import scala.util.control.NonFatal
import scala.util.{Failure, Try}

object TryWith {
  def apply[C <: AutoCloseable, R](resource: => C)(f: C => R): Try[R] =
    Try(resource).flatMap(resourceInstance => {
      try {
        val returnValue = f(resourceInstance)
        Try(resourceInstance.close()).map(_ => returnValue)
      }
      catch {
        case NonFatal(exceptionInFunction) =>
          try {
            resourceInstance.close()
            Failure(exceptionInFunction)
          }
          catch {
            case NonFatal(exceptionInClose) =>
              exceptionInFunction.addSuppressed(exceptionInClose)
              Failure(exceptionInFunction)
          }
      }
    })
} 
Example 130
Source File: LogAnalyticsListenerSink.scala    From spark-monitoring   with MIT License 5 votes vote down vote up
package org.apache.spark.listeners.sink.loganalytics

import com.microsoft.pnp.client.loganalytics.{LogAnalyticsClient, LogAnalyticsSendBufferClient}
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.listeners.sink.SparkListenerSink
import org.json4s.JsonAST
import org.json4s.jackson.JsonMethods.compact

import scala.util.control.NonFatal

class LogAnalyticsListenerSink(conf: SparkConf) extends SparkListenerSink with Logging {
  private val config = new LogAnalyticsListenerSinkConfiguration(conf)


  protected lazy val logAnalyticsBufferedClient = new LogAnalyticsSendBufferClient(
    new LogAnalyticsClient(
      config.workspaceId, config.secret),
    config.logType
  )

  override def logEvent(event: Option[JsonAST.JValue]): Unit = {
    try {
      event match {
        case Some(j) => {
          val jsonString = compact(j)
          logDebug(s"Sending event to Log Analytics: ${jsonString}")
          logAnalyticsBufferedClient.sendMessage(jsonString, "SparkEventTime")
        }
        case None =>
      }
    } catch {
      case NonFatal(e) =>
        logError(s"Error sending to Log Analytics: $e")
    }
  }
} 
Example 131
Source File: package.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache

import java.util.Properties

import scala.util.control.NonFatal

package object livy {

  private object LivyBuildInfo {
    val (
        livyVersion: String,
        livyBuildUser: String,
        livyRevision: String,
        livyBranch: String,
        livyBuildDate: String,
        livyRepo: String
      ) = {
      val unknown = "<unknown>"
      val defaultValue = (unknown, unknown, unknown, unknown, unknown, unknown)
      val resource = Option(Thread.currentThread().getContextClassLoader
        .getResourceAsStream("livy-version-info.properties"))

      try {
        resource.map { r =>
          val properties = new Properties()
          properties.load(r)
          (
            properties.getProperty("version", unknown),
            properties.getProperty("user", unknown),
            properties.getProperty("revision", unknown),
            properties.getProperty("branch", unknown),
            properties.getProperty("date", unknown),
            properties.getProperty("url", unknown)
          )
        }.getOrElse(defaultValue)
      } catch {
        case NonFatal(e) =>
          // swallow the exception
          defaultValue
      } finally {
        try {
          resource.foreach(_.close())
        } catch {
          case NonFatal(e) => // swallow the exception in closing the stream
        }
      }
    }
  }

  val LIVY_VERSION = LivyBuildInfo.livyVersion
  val LIVY_BUILD_USER = LivyBuildInfo.livyBuildUser
  val LIVY_REVISION = LivyBuildInfo.livyRevision
  val LIVY_BRANCH = LivyBuildInfo.livyBranch
  val LIVY_BUILD_DATE = LivyBuildInfo.livyBuildDate
  val LIVY_REPO_URL = LivyBuildInfo.livyRepo
} 
Example 132
Source File: SQLInterpreter.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.repl

import java.lang.reflect.InvocationTargetException
import java.sql.Date

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
import org.json4s._
import org.json4s.JsonAST.{JNull, JString}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

import org.apache.livy.Logging
import org.apache.livy.rsc.RSCConf
import org.apache.livy.rsc.driver.SparkEntries


class SQLInterpreter(
    sparkConf: SparkConf,
    rscConf: RSCConf,
    sparkEntries: SparkEntries) extends Interpreter with Logging {

  case object DateSerializer extends CustomSerializer[Date](_ => ( {
    case JString(s) => Date.valueOf(s)
    case JNull => null
  }, {
    case d: Date => JString(d.toString)
  }))

  private implicit def formats: Formats = DefaultFormats + DateSerializer

  private var spark: SparkSession = null

  private val maxResult = rscConf.getInt(RSCConf.Entry.SQL_NUM_ROWS)

  override def kind: String = "sql"

  override def start(): Unit = {
    require(!sparkEntries.sc().sc.isStopped)
    spark = sparkEntries.sparkSession()
  }

  override protected[repl] def execute(code: String): Interpreter.ExecuteResponse = {
    try {
      val result = spark.sql(code)
      val schema = parse(result.schema.json)

      // Get the row data
      val rows = result.take(maxResult)
        .map {
          _.toSeq.map {
            // Convert java BigDecimal type to Scala BigDecimal, because current version of
            // Json4s doesn't support java BigDecimal as a primitive type (LIVY-455).
            case i: java.math.BigDecimal => BigDecimal(i)
            case e => e
          }
        }

      val jRows = Extraction.decompose(rows)

      Interpreter.ExecuteSuccess(
        APPLICATION_JSON -> (("schema" -> schema) ~ ("data" -> jRows)))
    } catch {
      case e: InvocationTargetException =>
        warn(s"Fail to execute query $code", e.getTargetException)
        val cause = e.getTargetException
        Interpreter.ExecuteError("Error", cause.getMessage, cause.getStackTrace.map(_.toString))

      case NonFatal(f) =>
        warn(s"Fail to execute query $code", f)
        Interpreter.ExecuteError("Error", f.getMessage, f.getStackTrace.map(_.toString))
    }
  }

  override def close(): Unit = { }
} 
Example 133
Source File: package.scala    From openlaw-core   with Apache License 2.0 5 votes vote down vote up
package org.adridadou.openlaw

import result.Implicits.RichResult
import cats.data.Validated._
import cats.data.{NonEmptyList, ValidatedNel}
import cats.data.NonEmptyList.{of, one}
import cats.implicits._
import org.adridadou.openlaw.result.Implicits.RichTry
import scala.util.Try
import scala.util.control.NonFatal

package object result {

  type ResultNel[+A] = ValidatedNel[FailureCause, A]

  type Result[+A] = Either[FailureCause, A]
  type Failure[Nothing] = Left[FailureCause, Nothing]
  type Success[+A] = Right[FailureCause, A]

  def attempt[A](f: => A): Result[A] = Try(f).toResult

  def handleFatalErrors(t: Throwable): Result[Nothing] = t match {
    case NonFatal(e: Exception) => Failure(e)
    case e                      => throw e
  }
}

package result {

  object Success {
    def unit: Result[Unit] = Success(())
    def apply[A](a: A): Result[A] = Right(a)
    def unapply[A](result: Result[A]): Option[A] = result.toOption
  }

  object ResultNel {
    def apply[A](nel: NonEmptyList[Result[A]]): ResultNel[NonEmptyList[A]] =
      nel.map(_.toResultNel).sequence
    def apply[A](nel: List[Result[A]]): ResultNel[List[A]] =
      nel.map(_.toResultNel).sequence
  }

  object FailureNel {
    def apply[A](e: FailureCause): ResultNel[A] =
      Invalid[NonEmptyList[FailureCause]](one(e))
    def apply[A](head: FailureCause, tail: FailureCause*): ResultNel[A] =
      Invalid[NonEmptyList[FailureCause]](of(head, tail: _*))
  }

  object Failure {
    def apply[A](f: FailureCause): Result[A] = Left(f)
    def apply[A](): Result[A] = apply(new RuntimeException)
    def apply[A](e: Exception): Result[A] = apply(FailureException(e))
    def apply[A](e: Exception, id: String): Result[A] =
      apply(FailureException(e, Some(id)))
    def apply[A](message: String): Result[A] = apply(FailureMessage(message))
    def apply[A](message: String, id: String): Result[A] =
      apply(FailureMessage(message, Some(id)))
    def apply[A](es: NonEmptyList[Exception]): ResultNel[A] =
      Invalid(es.map(FailureException(_)))
    // implicits are necessary here to disambiguate arguments after erasure
    def apply[A](
        es: NonEmptyList[(Exception, String)]
    )(implicit i: DummyImplicit): ResultNel[A] =
      Invalid(es.map { case (e, id) => FailureException(e, Some(id)) })
    def apply[A](
        messages: NonEmptyList[String]
    )(implicit i: DummyImplicit, i2: DummyImplicit): ResultNel[A] =
      Invalid(messages.map(FailureMessage(_)))
    def apply[A](messages: NonEmptyList[(String, String)])(
        implicit i: DummyImplicit,
        i2: DummyImplicit,
        i3: DummyImplicit
    ): ResultNel[A] =
      Invalid(messages.map { case (m, id) => FailureMessage(m, Some(id)) })
    def unapply(result: Result[_]): Option[(Exception, String)] =
      result.swap.toOption.map { f =>
        (f.e, f.id)
      }
  }
} 
Example 134
Source File: package.scala    From case-classy   with Apache License 2.0 5 votes vote down vote up
package classy

import predef._

import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory

import scala.util.control.NonFatal


    def load(loader: ClassLoader): Either[DecodeError, A] =
      decoder(ConfigFactory.load(loader))
    //#-typesafe

  }

  import ConfigDecoders.std._
  implicit val classyConfigReadConfig         = Read.instance(config)
  implicit val classyConfigReadString         = Read.instance(string)
  implicit val classyConfigReadNumber         = Read.instance(number)      //#=typesafe
  implicit val classyConfigReadBoolean        = Read.instance(boolean)
  implicit val classyConfigReadFiniteDuration = Read.instance(finiteDuration)
  implicit val classyConfigReadInt            = Read.instance(int)
  implicit val classyConfigReadLong           = Read.instance(long)
  implicit val classyConfigReadDouble         = Read.instance(double)

  implicit val classyConfigReadConfigList     = Read.instance(configList)
  implicit val classyConfigReadStringList     = Read.instance(stringList)
  implicit val classyConfigReadNumberList     = Read.instance(numberList)  //#=typesafe
  implicit val classyConfigReadBooleanList    = Read.instance(booleanList)
  implicit val classyConfigReadIntList        = Read.instance(intList)
  implicit val classyConfigReadLongList       = Read.instance(longList)
  implicit val classyConfigReadDoubleList     = Read.instance(doubleList)
} 
Example 135
Source File: ExpiryServiceTest.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import java.time.{Instant, LocalDate, ZoneOffset}

import cats.effect.ExitCase
import cats.implicits._
import cats.{Id, catsInstancesForId}
import com.evolutiongaming.kafka.journal.ExpireAfter
import com.evolutiongaming.kafka.journal.ExpireAfter.implicits._
import com.evolutiongaming.kafka.journal.eventual.cassandra.ExpireOn.implicits._
import com.evolutiongaming.kafka.journal.eventual.cassandra.ExpiryService.Action
import com.evolutiongaming.kafka.journal.util.BracketFromMonad
import org.scalatest.FunSuite
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._
import scala.util.control.NonFatal

class ExpiryServiceTest extends FunSuite with Matchers {
  import ExpiryServiceTest._

  test("expireOn") {
    val expireAfter = 1.day.toExpireAfter
    val expected = LocalDate.of(2019, 12, 12).toExpireOn
    expireService.expireOn(expireAfter, timestamp) shouldEqual expected
  }

  for {
    (expiry, expireAfter, action) <- List(
      (
        none[Expiry],
        1.minute.toExpireAfter.some,
        Action.update(Expiry(
          1.minute.toExpireAfter,
          LocalDate.of(2019, 12, 11).toExpireOn))),
      (
        none[Expiry],
        1.day.toExpireAfter.some,
        Action.update(Expiry(
          1.day.toExpireAfter,
          LocalDate.of(2019, 12, 12).toExpireOn))),
      (
        Expiry(
          1.day.toExpireAfter,
          LocalDate.of(2019, 12, 11).toExpireOn).some,
        1.day.toExpireAfter.some,
        Action.update(Expiry(
          1.day.toExpireAfter,
          LocalDate.of(2019, 12, 12).toExpireOn))),
      (
        Expiry(
          1.day.toExpireAfter,
          LocalDate.of(2019, 12, 12).toExpireOn).some,
        1.day.toExpireAfter.some,
        Action.ignore),
      (
        Expiry(
          1.day.toExpireAfter,
          LocalDate.of(2019, 12, 12).toExpireOn).some,
        none[ExpireAfter],
        Action.remove))
  } yield {
    test(s"action expiry: $expiry, expireAfter: $expireAfter, action: $action") {
      expireService.action(expiry, expireAfter, timestamp) shouldEqual action
    }
  }
}

object ExpiryServiceTest {

  implicit val bracketId: BracketFromMonad[Id, Throwable] = new BracketFromMonad[Id, Throwable] {

    def F = catsInstancesForId

    def bracketCase[A, B](acquire: Id[A])(use: A => Id[B])(release: (A, ExitCase[Throwable]) => Id[Unit]) = {
      flatMap(acquire) { a =>
        try {
          val b = use(a)
          try release(a, ExitCase.Completed) catch { case NonFatal(_) => }
          b
        } catch {
          case NonFatal(e) =>
            release(a, ExitCase.Error(e))
            raiseError(e)
        }
      }
    }

    def raiseError[A](a: Throwable) = throw a

    def handleErrorWith[A](fa: Id[A])(f: Throwable => Id[A]) = fa
  }

  val timestamp: Instant = Instant.parse("2019-12-11T10:10:10.00Z")

  val zoneId: ZoneOffset = ZoneOffset.UTC

  val expireService: ExpiryService[Id] = ExpiryService[Id](zoneId)
} 
Example 136
Source File: TestSync.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.util

import cats.Monad
import cats.effect.{ExitCase, Sync}

import scala.util.control.NonFatal

object TestSync {

  def apply[F[_]](implicit F: Monad[F]): Sync[F] = new Sync[F] {

    def suspend[A](thunk: => F[A]) = thunk

    def bracketCase[A, B](acquire: F[A])(use: A => F[B])(release: (A, ExitCase[Throwable]) => F[Unit]) = {
      flatMap(acquire) { a =>
        try {
          val b = use(a)
          try release(a, ExitCase.Completed) catch { case NonFatal(_) => }
          b
        } catch {
          case NonFatal(e) =>
            release(a, ExitCase.Error(e))
            raiseError(e)
        }
      }
    }

    def raiseError[A](e: Throwable) = throw e

    def handleErrorWith[A](fa: F[A])(f: Throwable => F[A]) = try fa catch { case NonFatal(e) => f(e) }

    def flatMap[A, B](fa: F[A])(f: A => F[B]) = F.flatMap(fa)(f)

    def tailRecM[A, B](a: A)(f: A => F[Either[A, B]]) = F.tailRecM(a)(f)

    def pure[A](a: A) = F.pure(a)
  }
} 
Example 137
Source File: Retry.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.persistence.jpa

import java.util.concurrent.CompletionStage
import java.util.function.Supplier

import akka.actor.Scheduler
import akka.pattern.after

import scala.concurrent.duration.Duration.fromNanos
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.control.NonFatal

// With thanks to https://gist.github.com/viktorklang/9414163
private[lagom] class Retry(delay: FiniteDuration, delayFactor: Double, maxRetries: Int) {
  def apply[T](op: => T)(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    def iterate(nextDelay: FiniteDuration, remainingRetries: Int): Future[T] =
      Future(op).recoverWith {
        case NonFatal(throwable) if remainingRetries > 0 => {
          onRetry(throwable, nextDelay, remainingRetries)
          after(nextDelay, s)(iterate(finiteMultiply(nextDelay, delayFactor), remainingRetries - 1))
        }
      }

    iterate(delay, maxRetries)
  }

  // For convenient use from Java 8
  def retry[T](op: Supplier[T])(implicit ec: ExecutionContext, s: Scheduler): CompletionStage[T] = {
    import scala.compat.java8.FutureConverters._

    apply(op.get()).toJava
  }

  protected def onRetry(throwable: Throwable, delay: FiniteDuration, remainingRetries: Int): Unit = ()

  private def finiteMultiply(duration: FiniteDuration, factor: Double): FiniteDuration =
    fromNanos((duration.toNanos * factor).toLong)
}

private[lagom] object Retry {
  def apply[T](delay: FiniteDuration, delayFactor: Double, maxRetries: Int)(
      op: => T
  )(implicit ec: ExecutionContext, s: Scheduler): Future[T] =
    (new Retry(delay, delayFactor, maxRetries))(op)
} 
Example 138
Source File: TopicFactory.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.api.broker

import javax.inject.Inject
import javax.inject.Singleton

import com.lightbend.lagom.javadsl.api.Descriptor.TopicCall
import com.lightbend.lagom.javadsl.api.broker.Topic
import play.api.inject.Injector

import scala.util.control.NonFatal


trait TopicFactoryProvider {
  def get: Option[TopicFactory]
}

@Singleton
class InjectorTopicFactoryProvider @Inject() (injector: Injector) extends TopicFactoryProvider {
  override lazy val get: Option[TopicFactory] =
    try {
      Some(injector.instanceOf[TopicFactory])
    } catch {
      case NonFatal(e) => None
    }
}

object NoTopicFactoryProvider extends TopicFactoryProvider {
  override val get = None
} 
Example 139
Source File: MultiNodeExpect.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.Done
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Scheduler
import akka.annotation.ApiMayChange
import akka.cluster.ddata.DistributedData
import akka.cluster.ddata.Flag
import akka.cluster.ddata.FlagKey
import akka.cluster.ddata.Replicator.Get
import akka.cluster.ddata.Replicator.GetSuccess
import akka.cluster.ddata.Replicator.ReadLocal
import akka.cluster.ddata.Replicator.Update
import akka.cluster.ddata.Replicator.UpdateSuccess
import akka.cluster.ddata.Replicator.WriteAll
import akka.cluster.ddata.Replicator.WriteConsistency
import akka.cluster.ddata.SelfUniqueAddress
import akka.event.LoggingAdapter
import akka.testkit.TestProbe

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.reflect.ClassTag
import akka.pattern.after
import akka.pattern.ask
import akka.util.Timeout

import scala.util.control.NonFatal

@ApiMayChange
class MultiNodeExpect(probe: TestProbe)(implicit system: ActorSystem) {
  private implicit val scheduler: Scheduler               = system.scheduler
  private implicit val executionContext: ExecutionContext = system.dispatcher

  val replicator: ActorRef             = DistributedData(system).replicator
  implicit val node: SelfUniqueAddress = DistributedData(system).selfUniqueAddress

  
  def expectMsgType[T](expectationKey: String, max: FiniteDuration)(implicit t: ClassTag[T]): Future[Done] = {
    val eventualT = () => Future(errorAsRuntime(probe.expectMsgType[T](max)))
    doExpect(eventualT)(expectationKey, max)
  }

  // prevents Errors from turning into BoxedError when using `Future(f)` (where f throws Error)
  private def errorAsRuntime[T](f: => T): T = {
    try {
      f
    } catch {
      case NonFatal(t)  => throw t
      case x: Throwable => throw new RuntimeException(x)
    }
  }

  private def doExpect[T](eventualT: () => Future[T])(expectationKey: String, max: FiniteDuration): Future[Done] = {
    val DataKey: FlagKey           = FlagKey(expectationKey)
    val writeAll: WriteConsistency = WriteAll(max)
    implicit val timeout: Timeout  = Timeout(max)

    val retryDelay = 3.second

    val fTimeout = after(max, scheduler)(Future.failed(new RuntimeException(s"timeout $max expired")))

    // If the local expectation wins, it must notify others.
    val fLocalExpect: Future[Done] = eventualT()
      .map { _ =>
        (replicator ? Update(DataKey, Flag.empty, writeAll)(
          _.switchOn
        )).mapTo[UpdateSuccess[Flag]]
      }
      .map(_ => Done)

    // if a remote expectation wins, we can move on.
    val poll: () => Future[Done] = () =>
      (replicator ? Get(DataKey, ReadLocal)).map {
        case g @ GetSuccess(DataKey, _) if g.get(DataKey).enabled => Done
        case _                                                    => throw new RuntimeException("Flag unset")
      }
    val fRemoteExpect: Future[Done] = retry(
      poll,
      retryDelay,
      Int.MaxValue // keep retrying, there's a timeout later
    )

    Future
      .firstCompletedOf(
        Seq(
          fLocalExpect,
          fRemoteExpect,
          fTimeout
        )
      )
  }

  // From vklang's https://gist.github.com/viktorklang/9414163
  def retry[T](op: () => Future[T], delay: FiniteDuration, retries: Int): Future[T] =
    op().recoverWith { case _ if retries > 0 => after(delay, scheduler)(retry(op, delay, retries - 1)) }
} 
Example 140
Source File: JsonUtils.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.kafka.common.TopicPartition
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


  def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
    val result = new HashMap[String, HashMap[Int, Long]]()
    implicit val ordering = new Ordering[TopicPartition] {
      override def compare(x: TopicPartition, y: TopicPartition): Int = {
        Ordering.Tuple2[String, Int].compare((x.topic, x.partition), (y.topic, y.partition))
      }
    }
    val partitions = partitionOffsets.keySet.toSeq.sorted  // sort for more determinism
    partitions.foreach { tp =>
        val off = partitionOffsets(tp)
        val parts = result.getOrElse(tp.topic, new HashMap[Int, Long])
        parts += tp.partition -> off
        result += tp.topic -> parts
    }
    Serialization.write(result)
  }
} 
Example 141
Source File: CreateHiveTableAsSelectCommand.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.execution

import scala.util.control.NonFatal

import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, LogicalPlan, OverwriteOptions}
import org.apache.spark.sql.execution.command.RunnableCommand
import org.apache.spark.sql.hive.MetastoreRelation



case class CreateHiveTableAsSelectCommand(
    tableDesc: CatalogTable,
    query: LogicalPlan,
    ignoreIfExists: Boolean)
  extends RunnableCommand {

  private val tableIdentifier = tableDesc.identifier

  override def innerChildren: Seq[LogicalPlan] = Seq(query)

  override def run(sparkSession: SparkSession): Seq[Row] = {
    lazy val metastoreRelation: MetastoreRelation = {
      import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
      import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
      import org.apache.hadoop.io.Text
      import org.apache.hadoop.mapred.TextInputFormat

      val withFormat =
        tableDesc.withNewStorage(
          inputFormat =
            tableDesc.storage.inputFormat.orElse(Some(classOf[TextInputFormat].getName)),
          outputFormat =
            tableDesc.storage.outputFormat
              .orElse(Some(classOf[HiveIgnoreKeyTextOutputFormat[Text, Text]].getName)),
          serde = tableDesc.storage.serde.orElse(Some(classOf[LazySimpleSerDe].getName)),
          compressed = tableDesc.storage.compressed)

      val withSchema = if (withFormat.schema.isEmpty) {
        // Hive doesn't support specifying the column list for target table in CTAS
        // However we don't think SparkSQL should follow that.
        tableDesc.copy(schema = query.output.toStructType)
      } else {
        withFormat
      }

      sparkSession.sessionState.catalog.createTable(withSchema, ignoreIfExists = false)

      // Get the Metastore Relation
      sparkSession.sessionState.catalog.lookupRelation(tableIdentifier) match {
        case r: MetastoreRelation => r
      }
    }
    // TODO ideally, we should get the output data ready first and then
    // add the relation into catalog, just in case of failure occurs while data
    // processing.
    if (sparkSession.sessionState.catalog.tableExists(tableIdentifier)) {
      if (ignoreIfExists) {
        // table already exists, will do nothing, to keep consistent with Hive
      } else {
        throw new AnalysisException(s"$tableIdentifier already exists.")
      }
    } else {
      try {
        sparkSession.sessionState.executePlan(InsertIntoTable(
          metastoreRelation, Map(), query, overwrite = OverwriteOptions(true),
          ifNotExists = false)).toRdd
      } catch {
        case NonFatal(e) =>
          // drop the created table.
          sparkSession.sessionState.catalog.dropTable(tableIdentifier, ignoreIfNotExists = true,
            purge = false)
          throw e
      }
    }

    Seq.empty[Row]
  }

  override def argString: String = {
    s"[Database:${tableDesc.database}}, " +
    s"TableName: ${tableDesc.identifier.table}, " +
    s"InsertIntoHiveTable]"
  }
} 
Example 142
Source File: SQLBuilderTest.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst

import scala.util.control.NonFatal

import org.apache.spark.sql.{DataFrame, Dataset, QueryTest}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.hive.test.TestHiveSingleton


abstract class SQLBuilderTest extends QueryTest with TestHiveSingleton {
  protected def checkSQL(e: Expression, expectedSQL: String): Unit = {
    val actualSQL = e.sql
    try {
      assert(actualSQL === expectedSQL)
    } catch {
      case cause: Throwable =>
        fail(
          s"""Wrong SQL generated for the following expression:
             |
             |${e.prettyName}
             |
             |$cause
           """.stripMargin)
    }
  }

  protected def checkSQL(plan: LogicalPlan, expectedSQL: String): Unit = {
    val generatedSQL = try new SQLBuilder(plan).toSQL catch { case NonFatal(e) =>
      fail(
        s"""Cannot convert the following logical query plan to SQL:
           |
           |${plan.treeString}
         """.stripMargin)
    }

    try {
      assert(generatedSQL === expectedSQL)
    } catch {
      case cause: Throwable =>
        fail(
          s"""Wrong SQL generated for the following logical query plan:
             |
             |${plan.treeString}
             |
             |$cause
           """.stripMargin)
    }

    checkAnswer(spark.sql(generatedSQL), Dataset.ofRows(spark, plan))
  }

  protected def checkSQL(df: DataFrame, expectedSQL: String): Unit = {
    checkSQL(df.queryExecution.analyzed, expectedSQL)
  }
} 
Example 143
Source File: ResolveInlineTables.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst.analysis

import scala.util.control.NonFatal

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Cast
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types.{StructField, StructType}


  private[analysis] def convert(table: UnresolvedInlineTable): LocalRelation = {
    // For each column, traverse all the values and find a common data type and nullability.
    val fields = table.rows.transpose.zip(table.names).map { case (column, name) =>
      val inputTypes = column.map(_.dataType)
      val tpe = TypeCoercion.findWiderTypeWithoutStringPromotion(inputTypes).getOrElse {
        table.failAnalysis(s"incompatible types found in column $name for inline table")
      }
      StructField(name, tpe, nullable = column.exists(_.nullable))
    }
    val attributes = StructType(fields).toAttributes
    assert(fields.size == table.names.size)

    val newRows: Seq[InternalRow] = table.rows.map { row =>
      InternalRow.fromSeq(row.zipWithIndex.map { case (e, ci) =>
        val targetType = fields(ci).dataType
        try {
          if (e.dataType.sameType(targetType)) {
            e.eval()
          } else {
            Cast(e, targetType).eval()
          }
        } catch {
          case NonFatal(ex) =>
            table.failAnalysis(s"failed to evaluate expression ${e.sql}: ${ex.getMessage}")
        }
      })
    }

    LocalRelation(attributes, newRows)
  }
} 
Example 144
Source File: StreamMetadata.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets

import scala.util.control.NonFatal

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataInputStream, FSDataOutputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.internal.Logging
import org.apache.spark.sql.streaming.StreamingQuery


  def write(
      metadata: StreamMetadata,
      metadataFile: Path,
      hadoopConf: Configuration): Unit = {
    var output: FSDataOutputStream = null
    try {
      val fs = FileSystem.get(hadoopConf)
      output = fs.create(metadataFile)
      val writer = new OutputStreamWriter(output)
      Serialization.write(metadata, writer)
      writer.close()
    } catch {
      case NonFatal(e) =>
        logError(s"Error writing stream metadata $metadata to $metadataFile", e)
        throw e
    } finally {
      IOUtils.closeQuietly(output)
    }
  }
} 
Example 145
Source File: HBaseCredentialProvider.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn.security

import scala.reflect.runtime.universe
import scala.util.control.NonFatal

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.Credentials
import org.apache.hadoop.security.token.{Token, TokenIdentifier}

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging

private[security] class HBaseCredentialProvider extends ServiceCredentialProvider with Logging {

  override def serviceName: String = "hbase"

  override def obtainCredentials(
      hadoopConf: Configuration,
      sparkConf: SparkConf,
      creds: Credentials): Option[Long] = {
    try {
      val mirror = universe.runtimeMirror(getClass.getClassLoader)
      val obtainToken = mirror.classLoader.
        loadClass("org.apache.hadoop.hbase.security.token.TokenUtil").
        getMethod("obtainToken", classOf[Configuration])

      logDebug("Attempting to fetch HBase security token.")
      val token = obtainToken.invoke(null, hbaseConf(hadoopConf))
        .asInstanceOf[Token[_ <: TokenIdentifier]]
      logInfo(s"Get token from HBase: ${token.toString}")
      creds.addToken(token.getService, token)
    } catch {
      case NonFatal(e) =>
        logDebug(s"Failed to get token from service $serviceName", e)
    }

    None
  }

  override def credentialsRequired(hadoopConf: Configuration): Boolean = {
    hbaseConf(hadoopConf).get("hbase.security.authentication") == "kerberos"
  }

  private def hbaseConf(conf: Configuration): Configuration = {
    try {
      val mirror = universe.runtimeMirror(getClass.getClassLoader)
      val confCreate = mirror.classLoader.
        loadClass("org.apache.hadoop.hbase.HBaseConfiguration").
        getMethod("create", classOf[Configuration])
      confCreate.invoke(null, conf).asInstanceOf[Configuration]
    } catch {
      case NonFatal(e) =>
        logDebug("Fail to invoke HBaseConfiguration", e)
        conf
    }
  }
} 
Example 146
Source File: SocketInputDStream.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.dstream

import java.io._
import java.net.{ConnectException, Socket}
import java.nio.charset.StandardCharsets

import scala.reflect.ClassTag
import scala.util.control.NonFatal

import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.util.NextIterator

private[streaming]
class SocketInputDStream[T: ClassTag](
    _ssc: StreamingContext,
    host: String,
    port: Int,
    bytesToObjects: InputStream => Iterator[T],
    storageLevel: StorageLevel
  ) extends ReceiverInputDStream[T](_ssc) {

  def getReceiver(): Receiver[T] = {
    new SocketReceiver(host, port, bytesToObjects, storageLevel)
  }
}

private[streaming]
class SocketReceiver[T: ClassTag](
    host: String,
    port: Int,
    bytesToObjects: InputStream => Iterator[T],
    storageLevel: StorageLevel
  ) extends Receiver[T](storageLevel) with Logging {

  private var socket: Socket = _

  def onStart() {

    logInfo(s"Connecting to $host:$port")
    try {
      socket = new Socket(host, port)
    } catch {
      case e: ConnectException =>
        restart(s"Error connecting to $host:$port", e)
        return
    }
    logInfo(s"Connected to $host:$port")

    // Start the thread that receives data over a connection
    new Thread("Socket Receiver") {
      setDaemon(true)
      override def run() { receive() }
    }.start()
  }

  def onStop() {
    // in case restart thread close it twice
    synchronized {
      if (socket != null) {
        socket.close()
        socket = null
        logInfo(s"Closed socket to $host:$port")
      }
    }
  }

  
  def bytesToLines(inputStream: InputStream): Iterator[String] = {
    val dataInputStream = new BufferedReader(
      new InputStreamReader(inputStream, StandardCharsets.UTF_8))
    new NextIterator[String] {
      protected override def getNext() = {
        val nextValue = dataInputStream.readLine()
        if (nextValue == null) {
          finished = true
        }
        nextValue
      }

      protected override def close() {
        dataInputStream.close()
      }
    }
  }
} 
Example 147
Source File: EventLogDownloadResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 148
Source File: WatchServiceObservable.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio

import java.nio.file.WatchEvent

import monix.eval.Task
import monix.execution.Ack.{ Continue, Stop }
import monix.execution.atomic.Atomic
import monix.execution.cancelables.SingleAssignCancelable
import monix.execution.exceptions.APIContractViolationException
import monix.execution.{ Callback, Cancelable, Scheduler }
import monix.reactive.Observable
import monix.reactive.observers.Subscriber

import scala.concurrent.Future
import scala.util.control.NonFatal

abstract class WatchServiceObservable extends Observable[Array[WatchEvent[_]]] {
  def watchService: Option[WatchService]

  private[this] val wasSubscribed = Atomic(false)
  override def unsafeSubscribeFn(subscriber: Subscriber[Array[WatchEvent[_]]]): Cancelable = {
    if (wasSubscribed.getAndSet(true)) {
      subscriber.onError(APIContractViolationException(this.getClass.getName))
      Cancelable.empty
    } else try startPolling(subscriber) catch {
      case NonFatal(e) =>
        subscriber.onError(e)
        Cancelable.empty
    }
  }

  def init(subscriber: Subscriber[Array[WatchEvent[_]]]): Future[Unit] =
    Future.successful(())

  private def startPolling(subscriber: Subscriber[Array[WatchEvent[_]]]): Cancelable = {
    import subscriber.scheduler

    val taskCallback = new Callback[Throwable, Array[WatchEvent[_]]]() {
      override def onSuccess(value: Array[WatchEvent[_]]): Unit = {}
      override def onError(ex: Throwable): Unit = {
        subscriber.onError(ex)
      }
    }
    val cancelable = Task
      .fromFuture(init(subscriber))
      .flatMap { _ =>
        loop(subscriber)
      }
      .executeWithOptions(_.enableAutoCancelableRunLoops)
      .runAsync(taskCallback)

    val extraCancelable = Cancelable(() => {
      cancelable.cancel()
    })
    SingleAssignCancelable.plusOne(extraCancelable)
  }

  private def loop(subscriber: Subscriber[Array[WatchEvent[_]]])(implicit scheduler: Scheduler): Task[Array[WatchEvent[_]]] = {
    import collection.JavaConverters._
    watchService.map { ws =>
      ws.take()
        .doOnCancel(Task.defer(ws.close()))
        .flatMap { key =>
          val events = key.pollEvents().asScala.toArray
          key.reset()
          Task.fromFuture(subscriber.onNext(events)).flatMap {
            case Continue => loop(subscriber)
            case Stop => emptyTask
          }
        }
    }
  }.getOrElse(emptyTask)

  private val emptyTask = Task.create[Array[WatchEvent[_]]]((_, _) => Cancelable.empty)
} 
Example 149
Source File: AsyncChannelConsumer.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio

import java.nio.ByteBuffer

import monix.execution.Ack.{ Continue, Stop }
import monix.execution.{ Ack, Callback, Cancelable, Scheduler }
import monix.execution.atomic.Atomic
import monix.execution.cancelables.{ AssignableCancelable, SingleAssignCancelable }
import monix.reactive.Consumer
import monix.reactive.observers.Subscriber

import scala.concurrent.{ Future, Promise }
import scala.util.control.NonFatal

private[nio] abstract class AsyncChannelConsumer extends Consumer[Array[Byte], Long] {
  def channel: Option[AsyncChannel]
  def withInitialPosition: Long = 0L
  def init(subscriber: AsyncChannelSubscriber): Future[Unit] = Future.successful(())

  class AsyncChannelSubscriber(consumerCallback: Callback[Throwable, Long])(implicit val scheduler: Scheduler)
    extends Subscriber[Array[Byte]] { self =>

    private[this] lazy val initFuture = init(self)
    private[this] val callbackCalled = Atomic(false)
    private[this] var position = withInitialPosition

    override def onNext(elem: Array[Byte]): Future[Ack] = {
      def write(): Future[Ack] = {
        val promise = Promise[Ack]()
        channel.foreach { sc =>
          try {
            sc
              .write(ByteBuffer.wrap(elem), position)
              .runAsync(
                new Callback[Throwable, Int] {
                  override def onError(exc: Throwable) = {
                    closeChannel()
                    sendError(exc)
                    promise.success(Stop)
                  }

                  override def onSuccess(result: Int): Unit = {
                    position += result
                    promise.success(Continue)
                  }
                })
          } catch {
            case NonFatal(ex) =>
              sendError(ex)
              promise.success(Stop)
          }
        }

        promise.future
      }

      if (initFuture.value.isEmpty) {
        initFuture.flatMap(_ => write())
      } else {
        write()
      }
    }

    override def onComplete(): Unit = {
      channel.collect { case sc if sc.closeOnComplete => closeChannel() }
      if (callbackCalled.compareAndSet(expect = false, update = true))
        consumerCallback.onSuccess(position)
    }

    override def onError(ex: Throwable): Unit = {
      closeChannel()
      sendError(ex)
    }

    private[nio] def onCancel(): Unit = {
      callbackCalled.set(true) 
      closeChannel()
    }

    private[nio] def sendError(t: Throwable) =
      if (callbackCalled.compareAndSet(expect = false, update = true)) {
        scheduler.execute(new Runnable {
          def run() = consumerCallback.onError(t)
        })
      }

    private[nio] final def closeChannel()(implicit scheduler: Scheduler) =
      channel.foreach(_.close().runToFuture)
  }

  override def createSubscriber(cb: Callback[Throwable, Long], s: Scheduler): (Subscriber[Array[Byte]], AssignableCancelable) = {
    val out = new AsyncChannelSubscriber(cb)(s)

    val extraCancelable = Cancelable(() => out.onCancel())
    val conn = SingleAssignCancelable.plusOne(extraCancelable)
    (out, conn)
  }
} 
Example 150
Source File: AsyncChannelObservable.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio

import java.nio.ByteBuffer

import monix.eval.Task
import monix.execution.Ack.{ Continue, Stop }
import monix.execution.{ Callback, Cancelable, Scheduler }
import monix.execution.atomic.Atomic
import monix.execution.cancelables.SingleAssignCancelable
import monix.execution.exceptions.APIContractViolationException
import monix.nio.internal.{ Bytes, EmptyBytes, NonEmptyBytes }
import monix.reactive.Observable
import monix.reactive.observers.Subscriber

import scala.concurrent.Future
import scala.util.control.NonFatal

private[nio] abstract class AsyncChannelObservable extends Observable[Array[Byte]] {
  def bufferSize: Int
  def channel: Option[AsyncChannel]
  def init(subscriber: Subscriber[Array[Byte]]): Future[Unit] =
    Future.successful(())

  private[this] val wasSubscribed = Atomic(false)
  override def unsafeSubscribeFn(subscriber: Subscriber[Array[Byte]]): Cancelable = {
    import subscriber.scheduler
    if (wasSubscribed.getAndSet(true)) {
      subscriber.onError(APIContractViolationException(this.getClass.getName))
      Cancelable.empty
    } else try startReading(subscriber) catch {
      case NonFatal(e) =>
        subscriber.onError(e)
        closeChannel()
        Cancelable.empty
    }
  }

  private def startReading(subscriber: Subscriber[Array[Byte]]): Cancelable = {
    import subscriber.scheduler

    val taskCallback = new Callback[Throwable, Array[Byte]]() {
      override def onSuccess(value: Array[Byte]): Unit = {
        channel.collect { case sc if sc.closeOnComplete => closeChannel() }
      }
      override def onError(ex: Throwable): Unit = {
        closeChannel()
        subscriber.onError(ex)
      }
    }
    val cancelable = Task
      .fromFuture(init(subscriber))
      .flatMap { _ =>
        loop(subscriber, 0)
      }
      .executeWithOptions(_.enableAutoCancelableRunLoops)
      .runAsync(taskCallback)

    val extraCancelable = Cancelable(() => {
      cancelable.cancel()
      closeChannel()
    })
    SingleAssignCancelable.plusOne(extraCancelable)
  }

  private[this] val buffer = ByteBuffer.allocate(bufferSize)
  private def loop(subscriber: Subscriber[Array[Byte]], position: Long)(implicit scheduler: Scheduler): Task[Array[Byte]] = {
    buffer.clear()
    channel.map { ch =>
      ch
        .read(buffer, position)
        .doOnCancel(Task.defer(ch.close()))
        .flatMap { result =>
          val bytes = Bytes(buffer, result)
          bytes match {
            case EmptyBytes =>
              subscriber.onComplete()
              Task.now(Bytes.emptyBytes)

            case NonEmptyBytes(arr) =>
              Task.fromFuture(subscriber.onNext(arr)).flatMap {
                case Continue =>
                  loop(subscriber, position + result)

                case Stop =>
                  Task.now(Bytes.emptyBytes)
              }
          }
        }
    }.getOrElse(Task.now(Bytes.emptyBytes))
  }

  private[nio] final def closeChannel()(implicit scheduler: Scheduler) =
    channel.foreach(_.close().runToFuture)
} 
Example 151
Source File: WatchService.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.file

import java.nio.file.StandardWatchEventKinds.{ ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY }
import java.nio.file.WatchEvent.Kind
import java.nio.file.{ Path, WatchEvent, WatchKey }

import com.sun.nio.file.SensitivityWatchEventModifier
import monix.execution.{ Callback, Cancelable, Scheduler }

import scala.concurrent.{ Future, Promise }
import scala.concurrent.duration.TimeUnit
import scala.util.control.NonFatal


abstract class WatchService extends AutoCloseable {
  def poll(timeout: Long, timeUnit: TimeUnit, cb: Callback[Throwable, Option[WatchKey]]): Unit

  def poll(timeout: Long, timeUnit: TimeUnit): Future[Option[WatchKey]] = {
    val p = Promise[Option[WatchKey]]()
    poll(timeout, timeUnit, Callback.fromPromise(p))
    p.future
  }

  def poll(cb: Callback[Throwable, Option[WatchKey]]): Unit

  def poll(): Future[Option[WatchKey]] = {
    val p = Promise[Option[WatchKey]]()
    poll(Callback.fromPromise(p))
    p.future
  }

  def take(cb: Callback[Throwable, WatchKey]): Unit

  def take(): Future[WatchKey] = {
    val p = Promise[WatchKey]()
    take(Callback.fromPromise(p))
    p.future
  }
}

object WatchService {
  val SupportedEvents: Set[Kind[_]] = Set(ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY)

  def apply(path: Path, events: Kind[_]*)(implicit scheduler: Scheduler): WatchService = {
    val watcher = path.getFileSystem.newWatchService()
    val watchFor = if (events.isEmpty) SupportedEvents else events

    path.register(
      watcher,
      watchFor.toArray,
      SensitivityWatchEventModifier.HIGH.asInstanceOf[WatchEvent.Modifier])

    new NIOWatcherServiceImplementation(watcher)
  }

  private final class NIOWatcherServiceImplementation(watcher: java.nio.file.WatchService)(implicit scheduler: Scheduler) extends WatchService {
    override def poll(timeout: Long, timeUnit: TimeUnit, cb: Callback[Throwable, Option[WatchKey]]): Unit = {
      try {
        val key = Option(watcher.poll(timeout, timeUnit))
        cb.onSuccess(key)
      } catch {
        case NonFatal(ex) =>
          cb.onError(ex)
      }
    }

    override def poll(cb: Callback[Throwable, Option[WatchKey]]): Unit = {
      try {
        val key = Option(watcher.poll())
        cb.onSuccess(key)
      } catch {
        case NonFatal(ex) =>
          cb.onError(ex)
      }
    }

    override def take(cb: Callback[Throwable, WatchKey]): Unit = {
      try {
        val key = watcher.take()
        cb.onSuccess(key)
      } catch {
        case NonFatal(ex) =>
          cb.onError(ex)
      }
    }

    override def close(): Unit = cancelable.cancel()

    private[this] val cancelable: Cancelable =
      Cancelable { () =>
        try watcher.close() catch {
          case NonFatal(ex) => scheduler.reportFailure(ex)
        }
      }
  }
} 
Example 152
Source File: IntegrationTest.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.file

import java.nio.file.{ Files, Paths, StandardOpenOption }
import java.util

import minitest.SimpleTestSuite
import monix.execution.Callback
import monix.nio.file

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }
import scala.util.control.NonFatal

object IntegrationTest extends SimpleTestSuite {
  test("same file generated") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val from = Paths.get(this.getClass.getResource("/testFiles/file.txt").toURI)
    val to = Paths.get("src/test/resources/out.txt")
    val consumer = file.writeAsync(to)
    val p = Promise[Boolean]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(true)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .consumeWith(consumer)
      .runAsync(callback)

    val result = Await.result(p.future, 3.second)
    assert(result)

    val f1 = Files.readAllBytes(from)
    val f2 = Files.readAllBytes(to)
    Files.delete(to) // clean
    assert(util.Arrays.equals(f1, f2))
  }

  test("add data to existing file") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val from = Paths.get(this.getClass.getResource("/testFiles/file.txt").toURI)
    val to = Paths.get("src/test/resources/existing.txt")
    val strSeq = Seq("A", "\u0024", "\u00A2", "\u20AC", new String(Array(0xF0, 0x90, 0x8D, 0x88).map(_.toByte)), "B")

    try {
      Files.write(to, strSeq.flatMap(_.getBytes).toArray, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.APPEND)
    } catch {
      case NonFatal(e) => fail(s"got error: $e")
    }
    val consumer = file.appendAsync(to, Files.size(to))
    val p = Promise[Boolean]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(true)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .consumeWith(consumer)
      .runAsync(callback)

    val result = Await.result(p.future, 3.second)
    assert(result)

    val f1 = Files.readAllBytes(from)
    val f2 = Files.readAllBytes(to)
    Files.delete(to) // clean

    val all1: Seq[Byte] = strSeq.flatMap(_.getBytes) ++ f1.toSeq
    assertEquals(all1, f2.toSeq)
  }
} 
Example 153
Source File: ProcessBot.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
import cats.instances.future._
import cats.syntax.functor._
import com.bot4s.telegram.api.declarative.Commands
import com.bot4s.telegram.future.Polling

import scala.concurrent.Future
import scala.util.control.NonFatal


class ProcessBot(token: String) extends ExampleBot(token)
  with Polling
  with Commands[Future] {

  onCommand('run | 'exec | 'execute | 'cmd) { implicit msg =>
    withArgs {
      args =>
        try {
          import sys.process._
          val result = args.mkString(" ") !!

          reply(result).void
        } catch {
          case NonFatal(e) =>
            reply("Exception: " + e.getMessage).void
        }
    }
  }
} 
Example 154
Source File: Webhook.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
package com.bot4s.telegram.api

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.bot4s.telegram.future.BotExecutionContext
import com.bot4s.telegram.methods.SetWebhook
import com.bot4s.telegram.models.{InputFile, Update}
import slogging.StrictLogging

import scala.concurrent.Future
import scala.util.control.NonFatal


  def certificate: Option[InputFile] = None

  def webhookReceiver: Route = {
    entity(as[Update]) { update =>
      try {
        receiveUpdate(update, None)
      } catch {
        case NonFatal(e) =>
          logger.error("Caught exception in update handler", e)
      }
      complete(StatusCodes.OK)
    }
  }

  abstract override def routes: Route = webhookRoute ~ super.routes

  abstract override def run(): Future[Unit] = {
    request(
      SetWebhook(
        url = webhookUrl,
        certificate = certificate,
        allowedUpdates = allowedUpdates)).flatMap {
      case true => super.run() // spawn WebRoutes
      case false =>
        logger.error("Failed to set webhook")
        throw new RuntimeException("Failed to set webhook")
    }
  }
} 
Example 155
Source File: Polling.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
package com.bot4s.telegram.future

import com.bot4s.telegram.api.{Polling => BasePolling}
import com.bot4s.telegram.methods.{DeleteWebhook, GetMe}
import com.bot4s.telegram.models.{Update, User}
import slogging.StrictLogging

import scala.concurrent.Future
import scala.util.control.NonFatal

trait Polling extends BasePolling[Future] with BotExecutionContext with StrictLogging {

  private type OffsetUpdates = (Option[Long], Seq[Update], User)

  @volatile private var polling: Future[Unit] = _

  private def poll(seed: Future[OffsetUpdates]): Future[OffsetUpdates] = {
    seed.flatMap {
      case (offset, updates, user) =>

        val maxOffset = updates
          .map(_.updateId)
          .foldLeft(offset) {
            (acc, e) =>
              Some(acc.fold(e)(e max _))
          }

        // Spawn next request before processing updates.
        val f = if (polling == null) seed
        else
          poll(
            pollingGetUpdates(maxOffset.map(_ + 1)).recover {
              case NonFatal(e) =>
                logger.error("GetUpdates failed", e)
                Seq.empty[Update]
            }.map((maxOffset, _, user))
          )

        for (u <- updates) {
          try {
            receiveUpdate(u, Some(user))
          } catch {
            case NonFatal(e) =>
              // Log and swallow, exception handling should happen on receiveUpdate.
              logger.error(s"receiveUpdate failed while processing: $u", e)
          }
        }

        f
    }
  }

  private def startPolling(user: User): Future[Unit] = {
    logger.info(s"Starting (long) polling: timeout=$pollingTimeout seconds")
    polling = poll(Future.successful((None, Seq(), user))).map(_ => ())
    polling.onComplete {
      case _ => logger.info("Long polling terminated")
    }
    polling
  }

  override def run(): Future[Unit] = synchronized {
    if (polling != null) {
      throw new RuntimeException("Bot is already running")
    }
    for {
      deleted <- request(DeleteWebhook)
      if deleted
      getMe <- request(GetMe)
      p <- startPolling(getMe)
    } yield {
      p
    }
  }

  override def shutdown(): Unit = synchronized {
    if (polling == null) {
      throw new RuntimeException("Bot is not running")
    }
    super.shutdown()
    polling = null
  }
} 
Example 156
Source File: Closer.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.wmexchanger.utils

import scala.language.reflectiveCalls
import scala.util.control.NonFatal

object Closer {

  protected type Closeable = {def close() : Unit}

  def close[Resource <: Closeable](resource: => Resource): Unit = resource.close()

  // This is so that exceptions caused during close are caught, but don't
  // prevent the registration of any previous exception.
  // See also https://medium.com/@dkomanov/scala-try-with-resources-735baad0fd7d.
  // Others have resource: => Closeable, but I want the resource evaluated beforehand
  // so that it doesn't throw an exception before there is anything to close.
  def autoClose[Resource <: Closeable, Result](resource: Resource)(function: Resource => Result): Result = {

    val (result: Option[Result], exception: Option[Throwable]) = try {
      (Some(function(resource)), None)
    }
    catch {
      case exception: Throwable => (None, Some(exception))
    }

    val closeException: Option[Throwable] = Option(resource).flatMap { resource =>
      try {
        resource.close()
        None
      }
      catch {
        case exception: Throwable => Some(exception)
      }
    }

    (exception, closeException) match {
      case (None, None) => result.get
      case (Some(ex), None) => throw ex
      case (None, Some(ex)) => throw ex
      case (Some(ex), Some(closeEx)) => (ex, closeEx) match {
        case (e, NonFatal(nonfatal)) =>
          // Put the potentially fatal one first.
          e.addSuppressed(nonfatal)
          throw e
        case (NonFatal(nonfatal), e) =>
          // Put the potentially fatal one first.
          e.addSuppressed(nonfatal)
          throw e
        case (e, closeE) =>
          // On tie, put exception before closeException.
          e.addSuppressed(closeE)
          throw e
      }
    }
  }

  // Allow for alternative syntax closeable.autoClose { closeable => ... }
  implicit class AutoCloser[Resource <: Closer.Closeable](resource: Resource) {

    def autoClose[Result](function: Resource => Result): Result = Closer.autoClose(resource)(function)
  }
} 
Example 157
Source File: Closer.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.utils

import scala.language.reflectiveCalls
import scala.util.control.NonFatal

object Closer {

  protected type Closeable = {def close() : Unit}

  def close[Resource <: Closeable](resource: => Resource): Unit = resource.close()

  // This is so that exceptions caused during close are caught, but don't
  // prevent the registration of any previous exception.
  // See also https://medium.com/@dkomanov/scala-try-with-resources-735baad0fd7d.
  // Others have resource: => Closeable, but I want the resource evaluated beforehand
  // so that it doesn't throw an exception before there is anything to close.
  def autoClose[Resource <: Closeable, Result](resource: Resource)(function: Resource => Result): Result = {

    val (result: Option[Result], exception: Option[Throwable]) = try {
      (Some(function(resource)), None)
    }
    catch {
      case exception: Throwable => (None, Some(exception))
    }

    val closeException: Option[Throwable] = Option(resource).flatMap { resource =>
      try {
        resource.close()
        None
      }
      catch {
        case exception: Throwable => Some(exception)
      }
    }

    (exception, closeException) match {
      case (None, None) => result.get
      case (Some(ex), None) => throw ex
      case (None, Some(ex)) => throw ex
      case (Some(ex), Some(closeEx)) => (ex, closeEx) match {
        case (e, NonFatal(nonfatal)) =>
          // Put the potentially fatal one first.
          e.addSuppressed(nonfatal)
          throw e
        case (NonFatal(nonfatal), e) =>
          // Put the potentially fatal one first.
          e.addSuppressed(nonfatal)
          throw e
        case (e, closeE) =>
          // On tie, put exception before closeException.
          e.addSuppressed(closeE)
          throw e
      }
    }
  }

  // Allow for alternative syntax closeable.autoClose { closeable => ... }
  implicit class AutoCloser[Resource <: Closer.Closeable](resource: Resource) {

    def autoClose[Result](function: Resource => Result): Result = Closer.autoClose(resource)(function)
  }
} 
Example 158
Source File: Closer.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.elasticsearch.utils

import scala.language.reflectiveCalls
import scala.util.control.NonFatal

object Closer {

  protected type Closeable = {def close() : Unit}

  def close[Resource <: Closeable](resource: => Resource): Unit = resource.close()

  // This is so that exceptions caused during close are caught, but don't
  // prevent the registration of any previous exception.
  // See also https://medium.com/@dkomanov/scala-try-with-resources-735baad0fd7d.
  // Others have resource: => Closeable, but I want the resource evaluated beforehand
  // so that it doesn't throw an exception before there is anything to close.
  def autoClose[Resource <: Closeable, Result](resource: Resource)(function: Resource => Result): Result = {

    val (result: Option[Result], exception: Option[Throwable]) = try {
      (Some(function(resource)), None)
    }
    catch {
      case exception: Throwable => (None, Some(exception))
    }

    val closeException: Option[Throwable] = Option(resource).flatMap { resource =>
      try {
        resource.close()
        None
      }
      catch {
        case exception: Throwable => Some(exception)
      }
    }

    (exception, closeException) match {
      case (None, None) => result.get
      case (Some(ex), None) => throw ex
      case (None, Some(ex)) => throw ex
      case (Some(ex), Some(closeEx)) => (ex, closeEx) match {
        case (e, NonFatal(nonfatal)) =>
          // Put the potentially fatal one first.
          e.addSuppressed(nonfatal)
          throw e
        case (NonFatal(nonfatal), e) =>
          // Put the potentially fatal one first.
          e.addSuppressed(nonfatal)
          throw e
        case (e, closeE) =>
          // On tie, put exception before closeException.
          e.addSuppressed(closeE)
          throw e
      }
    }
  }

  // Allow for alternative syntax closeable.autoClose { closeable => ... }
  implicit class AutoCloser[Resource <: Closer.Closeable](resource: Resource) {

    def autoClose[Result](function: Resource => Result): Result = Closer.autoClose(resource)(function)
  }
} 
Example 159
Source File: Assertions.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.util.concurrent.TimeoutException

import scala.annotation.tailrec
import scala.util.control.NonFatal


trait Assertions {

  def eventually[T](timeout: Long, interval: Long)(func: => T): T = {
    def makeAttempt(): Either[Throwable, T] = {
      try Right(func) catch {
        case NonFatal(e) => Left(e)
      }
    }

    val startTime = System.currentTimeMillis()
    @tailrec
    def tryAgain(attempt: Int): T = {
      makeAttempt() match {
        case Right(result) => result
        case Left(e) =>
          val duration = System.currentTimeMillis() - startTime
          if (duration < timeout) {
            Thread.sleep(interval)
          } else {
            throw new TimeoutException(e.getMessage)
          }

          tryAgain(attempt + 1)
      }
    }

    tryAgain(1)
  }

} 
Example 160
Source File: CompositeRecordBuilder.scala    From spark-records   with Apache License 2.0 5 votes vote down vote up
package com.swoop.spark.records

import scala.util.Try
import scala.util.control.NonFatal



  def build: TraversableOnce[Rec] = {
    collectInputStatistics()
    Try(buildRecords(buildPartition))
      .recover { case NonFatal(ex) => Seq(unhandledException(ex)) }
      .get
  }

  protected def buildPartition(recordInput: RecordInput): TraversableOnce[Rec] =
    Try(recordBuilder(recordInput).build)
      .recover { case NonFatal(ex) => Seq(unhandledException(ex)) }
      .get

  override protected def unhandledException(ex: Throwable): Rec = {
    val record = super.unhandledException(ex)
    Try(collectIssueStats(record))
    record
  }

} 
Example 161
Source File: Resources.scala    From weld-java   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package weld

import scala.collection.mutable
import scala.util.control.NonFatal

object Resources {
  def withCleanup[T](f: Resources => T): T = {
    val resources = new Resources
    try {
      val result = f(resources)
      resources.close()
      result
    } catch {
      case caught: Throwable =>
        // We want to throw and see the original exception so swallow exceptions during close.
        resources.close(swallowExceptions = true)
        throw caught
    }
  }
}


  def close(swallowExceptions: Boolean = false): Unit = {
    var throwable: Throwable = null
    closeables.foreach { closeable =>
      try closeable.close() catch {
        case NonFatal(e) =>
          if (!swallowExceptions && throwable == null) {
            throwable = e
          }
      }
    }
    if (throwable != null) {
      throw throwable
    }
  }
} 
Example 162
Source File: ResilientStream.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.resiliency

import cats.effect.{Sync, Timer}
import cats.syntax.apply._
import dev.profunktor.fs2rabbit.effects.Log
import fs2.Stream

import scala.concurrent.duration._
import scala.util.control.NonFatal


object ResilientStream {

  def runF[F[_]: Log: Sync: Timer](program: F[Unit], retry: FiniteDuration = 5.seconds): F[Unit] =
    run(Stream.eval(program), retry)

  def run[F[_]: Log: Sync: Timer](
      program: Stream[F, Unit],
      retry: FiniteDuration = 5.seconds
  ): F[Unit] =
    loop(program, retry, 1).compile.drain

  private def loop[F[_]: Log: Sync: Timer](
      program: Stream[F, Unit],
      retry: FiniteDuration,
      count: Int
  ): Stream[F, Unit] =
    program.handleErrorWith {
      case NonFatal(err) =>
        Stream.eval(Log[F].error(err.getMessage) *> Log[F].info(s"Restarting in ${retry.toSeconds * count}...")) >>
          loop[F](Stream.sleep(retry) >> program, retry, count + 1)
    }

} 
Example 163
Source File: Scalac.scala    From borer   with Mozilla Public License 2.0 5 votes vote down vote up
package io.bullet.borer

import scala.reflect.macros.blackbox
import scala.util.control.NonFatal
import scala.util.matching.Regex

object Scalac {

  sealed trait TypeCheck {
    def assertErrorMsgMatches(string: String): Unit
    def assertErrorMsgMatches(regex: Regex): Unit
  }

  object TypeCheck {

    final case class Result(code: String, tpe: String) extends TypeCheck {
      def assertErrorMsgMatches(string: String): Unit = assertErrorMsgMatches(null: Regex)

      def assertErrorMsgMatches(regex: Regex): Unit =
        sys.error(s"Code Fragment compiled without error to an expression of type `$tpe`:\n\n$code")
    }

    final case class Error(msg: String) extends TypeCheck {
      def assertErrorMsgMatches(string: String): Unit = assert(msg == string, string)
      def assertErrorMsgMatches(regex: Regex): Unit   = assert(regex.findAllIn(msg).hasNext, regex)

      private def assert(value: Boolean, expected: Any): Unit =
        if (!value) sys.error(s"Expected compiler error matching [$expected] but got [$msg]")
    }
  }

  
  def typecheck(codeFragment: String): TypeCheck = macro Macro.typecheck

  private object Macro {

    def typecheck(c: blackbox.Context)(codeFragment: c.Tree): c.Tree = {
      import c.universe._

      val fragment = codeFragment match {
        case Literal(Constant(x: String)) => x
        case _                            => c.abort(c.enclosingPosition, "`codeFragment` argument must be a literal string")
      }

      try {
        val name0 = TermName(c.freshName)
        val name1 = TermName(c.freshName)
        c.typecheck(c.parse(s"object $name0 { val $name1 = { $fragment } }")) match {
          case ModuleDef(_, _, Template(_, _, List(_, valDef: ValDef, defDef: DefDef))) =>
            val tpe = defDef.symbol.asMethod.returnType.toString
            q"_root_.io.bullet.borer.Scalac.TypeCheck.Result(${showCode(valDef.rhs)}, $tpe)"
          case x => c.abort(c.enclosingPosition, s"Unexpected scalac result:\n\n${showCode(x)}")
        }
      } catch {
        case NonFatal(e) => q"_root_.io.bullet.borer.Scalac.TypeCheck.Error(${e.getMessage})"
      }
    }
  }
} 
Example 164
Source File: NlpAnnotate.scala    From ike   with Apache License 2.0 5 votes vote down vote up
package org.allenai.ike.index

import org.allenai.nlpstack.chunk.{ defaultChunker => chunker }
import org.allenai.nlpstack.core._
import org.allenai.nlpstack.lemmatize.{ MorphaStemmer => lemmatizer }
import org.allenai.nlpstack.postag.{ defaultPostagger => postagger }
import org.allenai.nlpstack.segment.{ defaultSegmenter => segmenter }
import org.allenai.nlpstack.tokenize.{ defaultTokenizer => tokenizer }

import scala.util.control.NonFatal

object NlpAnnotate {
  def segment(text: String): Seq[Segment] = segmenter.segment(text).toSeq

  def tokenize(segment: Segment): Seq[Token] = tokenizer.tokenize(segment.text)

  def postag(tokens: Seq[Token]): Seq[PostaggedToken] = postagger.postagTokenized(tokens)

  def chunk(tokens: Seq[PostaggedToken]): Seq[ChunkedToken] = chunker.chunkPostagged(tokens)

  def addEndingMarkers(tokens: Seq[ChunkedToken]): Seq[ChunkedToken] = {
    if (tokens.isEmpty) {
      List()
    } else {
      def swI(x: String) = x.startsWith("I-")
      def swB(x: String) = x.startsWith("B-")

      (tokens.sliding(2).toList :+ Seq(tokens.last)).map {
        case Seq(ChunkedToken(a, b, c, d), ChunkedToken(x, _, _, _)) if swI(a) && swB(x) =>
          ChunkedToken("E-" + a.substring(2), b, c, d)
        case Seq(ChunkedToken(a, b, c, d), ChunkedToken(x, _, _, _)) if swB(a) && swB(x) =>
          ChunkedToken("BE-" + a.substring(2), b, c, d)
        case Seq(ChunkedToken(a, b, c, d), ChunkedToken(x, _, _, _)) =>
          ChunkedToken(a, b, c, d)
        case Seq(ChunkedToken(a, b, c, d)) if swB(a) =>
          ChunkedToken("BE-" + a.substring(2), b, c, d)
        case Seq(ChunkedToken(a, b, c, d)) =>
          ChunkedToken(a, b, c, d)
      }
    }
  }

  def lemmatize(chunked: Seq[ChunkedToken]): Seq[Lemmatized[ChunkedToken]] =
    chunked.map(lemmatizer.lemmatizePostaggedToken)

  def annotate(text: String): Seq[Seq[Lemmatized[ChunkedToken]]] = segment(text).flatMap {
    segment =>
      val tokens = tokenize(segment)
      val tagged = postag(tokens)
      try {
        val chunked = chunk(tagged)
        val chunkedWithEndingMarkers = addEndingMarkers(chunked)
        Some(lemmatize(chunkedWithEndingMarkers))
      } catch {
        case NonFatal(e) => None
      }
  }
} 
Example 165
Source File: ExampleData.scala    From cuesheet   with Apache License 2.0 5 votes vote down vote up
package com.kakao.cuesheet.examples.util

import java.io.FileOutputStream

import com.google.common.io.{ByteStreams, Files}

import scala.util.control.NonFatal

object ExampleData {
  lazy val path: String = {
    try {
      val resource = "data.tsv"
      val tmpfile = Files.createTempDir().getAbsolutePath + resource
      val input = getClass.getResourceAsStream(resource)
      val output = new FileOutputStream(tmpfile)
      ByteStreams.copy(input, output)
      input.close()
      output.close()
      tmpfile
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException("Could not copy example data file to temp directory", e)
    }
  }
} 
Example 166
Source File: JSFacade.scala    From metarpheus   with MIT License 5 votes vote down vote up
package io.buildo.metarpheus
package core

import scala.scalajs.js
import scala.scalajs.js.annotation._
import scala.util.control.NonFatal

import io.circe._
import io.circe.syntax._
import io.circe.parser.decode
import io.circe.generic.extras._
import io.circe.generic.extras.auto._

object JSFacade {

  trait JSConfig extends js.Object {
    val modelsForciblyInUse: js.UndefOr[js.Array[String]]
  }

  @JSExportTopLevel("run")
  def run(paths: js.Array[String], jsConfig: js.UndefOr[JSConfig]) = {
    implicit val circeConfiguration: Configuration =
      Configuration.default.withDefaults.withDiscriminator("_type")
    val config = jsConfig
      .map { jsConfig =>
        val json = js.JSON.stringify(jsConfig)
        decode[Config](json) match {
          case Left(error) => throw js.JavaScriptException(error.toString)
          case Right(config) => config
        }
      }
      .getOrElse(Config.default)

    try {
      val result = Metarpheus.run(paths.toList, config)
      val printer = Printer.noSpaces.copy(dropNullKeys = true)
      js.JSON.parse(printer.pretty(result.asJson))
    } catch {
      case NonFatal(e) => throw js.JavaScriptException(e.getMessage)
    }
  }

} 
Example 167
Source File: Last.scala    From eff   with MIT License 5 votes vote down vote up
package org.atnos.eff

import cats._
import cats.implicits._

import scala.util.control.NonFatal


  def interpret[U](n: Eff[R, Unit] => Eff[U, Unit]): Last[U] =
    Last[U](value.map(v => v.map(n)))

  def interpretEff[U](n: Last[R] => Eff[U, Unit]): Last[U] =
    Last.eff(n(this))

  def <*(last: Last[R]): Last[R] =
    (value, last.value) match {
      case (None, None)       => this
      case (Some(r), None)    => this
      case (None, Some(l))    => last
      case (Some(r), Some(l)) => Last(Option(r *> l))
    }

  def *>(last: Last[R]): Last[R] =
    (value, last.value) match {
      case (None, None)       => this
      case (Some(r), None)    => this
      case (None, Some(l))    => last
      case (Some(r), Some(l)) => Last(Option(r *> l))
    }
}

object Last {

  def none[R]: Last[R] =
    Last(None)

  def eff[R](e: =>Eff[R, Unit]): Last[R] =
    Last(Option(Eval.later(evaluate(e))))

  def evaluate[R](e: =>Eff[R, Unit]): Eff[R, Unit] =
    try e
    catch { case NonFatal(t) =>
       if (sys.props.isDefinedAt("eff.debuglast"))
         println("executing one last eff action failed\n"+t.getStackTrace.mkString("\n"))
        Eff.pure[R, Unit](())
    }

} 
Example 168
Source File: ReaderError.scala    From tethys   with Apache License 2.0 5 votes vote down vote up
package tethys.readers

import scala.util.control.NonFatal

final class ReaderError protected(message: String, cause: Throwable, field: String) extends Exception(message, cause)

object ReaderError {
  def wrongJson(reason: String, cause: Throwable = null)(implicit fieldName: FieldName): Nothing = {
    val field = fieldName.value()
    throw new ReaderError(
      message = s"Illegal json at '$field': $reason",
      cause = null,
      field = field
    )
  }

  def catchNonFatal[A](fun: => A)(implicit fieldName: FieldName): Either[ReaderError, A] = {
    try Right(fun) catch {
      case err: ReaderError => Left(err)
      case NonFatal(e) => Left(new ReaderError(
        message = e.getMessage,
        cause = e,
        field = fieldName.value())
      )
    }
  }
} 
Example 169
Source File: ExceptionHandling.scala    From akka-http-extensions   with Apache License 2.0 5 votes vote down vote up
package com.lonelyplanet.akka.http.extensions

import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import com.lonelyplanet.akka.http.extensions.exceptions.ResourceNotFound
import com.lonelyplanet.akka.http.extensions.rejections.BadParameterRejection
import com.lonelyplanet.util.logging.Loggable
import org.zalando.jsonapi.Jsonapi
import org.zalando.jsonapi.json.akka.http.AkkaHttpJsonapiSupport._
import spray.json._

import scala.util.control.NonFatal

trait ExceptionHandling extends Loggable {

  def timeoutResponse(traceToken: Option[String]): HttpResponse = {
    HttpResponse(
      status = StatusCodes.GatewayTimeout,
      entity = HttpEntity(
        contentType = MediaTypes.`application/vnd.api+json`,
        string = Jsonapi
          .asRootObject(ErrorMessage("There was a timeout processing your request", traceToken))
          .toJson
          .compactPrint
      )
    )
  }

  implicit def rejectionHandler: RejectionHandler =
    RejectionHandler.newBuilder()
      .handle {
        case BadParameterRejection(message) =>
          optionalHeaderValueByName("x-trace-token") { traceToken =>
            complete {
              (BadRequest, ErrorMessage("Invalid query parameter value", traceToken, Some(message)))
            }
          }
      }.handleAll[MethodRejection] { methodRejections ⇒
        val supportedMethods = methodRejections.map(_.supported.name)
        optionalHeaderValueByName("x-trace-token") { traceToken =>
          complete {
            (MethodNotAllowed, ErrorMessage(s"HTTP method not allowed, supported only: ${supportedMethods mkString " or "}", traceToken))
          }
        }
      }
      .handleNotFound {
        optionalHeaderValueByName("x-trace-token") { traceToken =>
          complete {
            (NotFound, ErrorMessage(s"Requested resource not found", traceToken))
          }
        }
      }
      .result()

  private def internalServerErrorResponse(traceToken: Option[String]) = {
    (InternalServerError, ErrorMessage(s"Error occurred while processing the request", traceToken))
  }

  private def resourceNotFoundResponse(message: String, traceToken: Option[String]) = {
    (NotFound, ErrorMessage(message, traceToken))
  }

  implicit def exceptionHandler: ExceptionHandler =
    ExceptionHandler {
      case e: ResourceNotFound =>
        optionalHeaderValueByName("x-trace-token") { traceToken =>
          complete(resourceNotFoundResponse(e.getMessage, traceToken))
        }
      case NonFatal(e) =>
        optionalHeaderValueByName("x-trace-token") { traceToken =>
          extractUri { uri =>
            logger.error(s"Request to $uri could not be handled", e)
            complete(internalServerErrorResponse(traceToken))
          }
        }
    }
} 
Example 170
Source File: KamonTracingDirectives.scala    From akka-http-extensions   with Apache License 2.0 5 votes vote down vote up
package com.lonelyplanet.akka.http.extensions.tracing

import akka.http.scaladsl.server._
import akka.http.scaladsl.server.directives.{BasicDirectives, ExecutionDirectives, HeaderDirectives}
import kamon.Kamon
import kamon.trace.Tracer

import scala.util.control.NonFatal

trait KamonTracingDirectives extends BasicDirectives with ExecutionDirectives with HeaderDirectives {

  def withTraceToken(maybeTraceToken: Option[String]): Directive[Unit] = extractRequestContext.flatMap { ctx =>
    Tracer.setCurrentContext(
      Kamon.tracer.newContext(name = "undefined", token = maybeTraceToken)
    )

    mapResponse { resp =>
      clearCurrentContext()
      resp
    } & handleExceptions {
      ExceptionHandler {
        case NonFatal(e) =>
          clearCurrentContext()
          throw e
      }
    } & mapRejections { r =>
      clearCurrentContext()
      r
    }
  }

  private def clearCurrentContext() = {
    if (!Tracer.currentContext.isClosed) {
      Tracer.currentContext.finish()
      Tracer.clearCurrentContext
    }
  }
} 
Example 171
Source File: AnalyzerRule.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package analyzer

import java.io.{PrintWriter, StringWriter}

import scala.tools.nsc.Global
import scala.util.control.NonFatal

abstract class AnalyzerRule(val global: Global, val name: String, defaultLevel: Level = Level.Warn) {

  import global._

  var level: Level = defaultLevel
  var argument: String = _

  protected def classType(fullName: String): Type =
    try rootMirror.staticClass(fullName).asType.toType.erasure catch {
      case _: ScalaReflectionException => NoType
    }

  protected def analyzeTree(fun: PartialFunction[Tree, Unit])(tree: Tree): Unit =
    try fun.applyOrElse(tree, (_: Tree) => ()) catch {
      case NonFatal(t) =>
        val sw = new StringWriter
        t.printStackTrace(new PrintWriter(sw))
        reporter.error(tree.pos, s"Analyzer rule $this failed: " + sw.toString)
    }

  private def adjustMsg(msg: String): String = s"[AVS] $msg"

  protected def report(pos: Position, message: String): Unit =
    level match {
      case Level.Off =>
      case Level.Info => reporter.echo(pos, adjustMsg(message))
      case Level.Warn => reporter.warning(pos, adjustMsg(message))
      case Level.Error => reporter.error(pos, adjustMsg(message))
    }

  def analyze(unit: CompilationUnit): Unit

  override def toString: String =
    getClass.getSimpleName
}

sealed trait Level
object Level {
  case object Off extends Level
  case object Info extends Level
  case object Warn extends Level
  case object Error extends Level
} 
Example 172
Source File: CirceYaml.scala    From bazel-deps   with MIT License 5 votes vote down vote up
package com.github.johnynek.bazel_deps

import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import io.circe.jackson.CirceJsonModule
import io.circe.{Decoder, Json, ParsingFailure, Parser}
import scala.util.control.NonFatal


object Yaml extends Parser {
  private[this] val mapper = new ObjectMapper(new YAMLFactory()).registerModule(CirceJsonModule)
  private[this] val factory = mapper.getFactory
  override def parse(input: String): Either[ParsingFailure, Json] =
    try {
      Right(mapper.readValue(factory.createParser(input), classOf[Json]))
    } catch {
      case NonFatal(error) => Left(ParsingFailure(error.getMessage, error))
    }
} 
Example 173
Source File: Resolver.scala    From bazel-deps   with MIT License 5 votes vote down vote up
package com.github.johnynek.bazel_deps

import scala.collection.immutable.SortedMap
import scala.util.Try
import scala.util.control.NonFatal
import cats.MonadError
import cats.implicits._

case class ResolveFailure(message: String,
  m: MavenCoordinate,
  extension: String,
  failures: List[Exception]) extends Exception(message)

trait Resolver[F[_]] {
  implicit def resolverMonad: MonadError[F, Throwable]

  def getShas(m: List[MavenCoordinate]): F[SortedMap[MavenCoordinate, ResolvedShasValue]]

  // Build the entire transitive graph of a set of coordinates
  def buildGraph(coords: List[MavenCoordinate], m: Model): F[Graph[MavenCoordinate, Unit]]

  def run[A](fa: F[A]): Try[A]
}

trait SequentialResolver[F[_]] extends Resolver[F] {
  // This transitively adds the entire reachable graph of dep
  // to the current deps.
  def addToGraph(deps: Graph[MavenCoordinate, Unit], dep: MavenCoordinate, m: Model): F[Graph[MavenCoordinate, Unit]]

  def addAll(deps: Graph[MavenCoordinate, Unit], coords: List[MavenCoordinate], m: Model): F[Graph[MavenCoordinate, Unit]] =
    coords.foldM(deps)(addToGraph(_, _, m))

  def buildGraph(coords: List[MavenCoordinate], m: Model): F[Graph[MavenCoordinate, Unit]] =
    addAll(Graph.empty, coords, m)
} 
Example 174
Source File: BigQueryPartitionUtils.scala    From spark-bigquery   with Apache License 2.0 5 votes vote down vote up
package com.samelamin.spark.bigquery.utils

import com.google.api.client.googleapis.json.GoogleJsonResponseException
import com.google.api.services.bigquery.Bigquery
import com.google.api.services.bigquery.model.{Table, TableReference, TableSchema, TimePartitioning}
import com.google.cloud.hadoop.io.bigquery.BigQueryStrings
import org.apache.log4j.LogManager

import scala.util.control.NonFatal


class BigQueryPartitionUtils(bqService: Bigquery)  {
  private val logger = LogManager.getRootLogger()
  val DEFAULT_TABLE_EXPIRATION_MS = 259200000L

  def createBigQueryPartitionedTable(targetTable: TableReference,
                                     timePartitionExpiration: Long = 0,
                                     tableSchema: TableSchema = null,
                                     timePartitioningField:String = null): Any = {
    val fullyQualifiedOutputTableId = BigQueryStrings.toString(targetTable)
    val decoratorsRegex = ".+?(?=\\$)".r
    val cleanTableName = BigQueryStrings
    .parseTableReference(decoratorsRegex.findFirstIn(fullyQualifiedOutputTableId)
    .getOrElse(fullyQualifiedOutputTableId))
    val projectId = cleanTableName.getProjectId
    val datasetId = cleanTableName.getDatasetId
    val tableId = cleanTableName.getTableId
    if(doesTableAlreadyExist(projectId,datasetId,tableId)) {
      return
    } else {
      logger.info(s"Creating Table $tableId")
      val table = new Table()
      table.setTableReference(cleanTableName)
      val timePartitioning = new TimePartitioning()
      timePartitioning.setType("DAY")
      if(timePartitioningField != null) {
        timePartitioning.setField(timePartitioningField)
      }
      table.setTimePartitioning(timePartitioning)
      if (timePartitionExpiration > 0) {
        table.setExpirationTime(timePartitionExpiration)
      }
      table.setSchema(tableSchema)
      bqService.tables().insert(cleanTableName.getProjectId, cleanTableName.getDatasetId, table).execute()
      logger.info(s"Table $tableId created")
    }

  }

  def doesTableAlreadyExist(projectId: String, datasetId: String, tableId: String): Boolean = {
    try {
      bqService.tables().get(projectId,datasetId,tableId).execute()
      return true
    } catch {
      case e: GoogleJsonResponseException if e.getStatusCode == 404 =>
        logger.info(s"$projectId:$datasetId.$tableId does not exist")
        return false
      case NonFatal(e) => throw e
    }
  }
} 
Example 175
Source File: ElasticSearchBaseClient.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.commons.es.client

import akka.http.scaladsl.model.StatusCodes.GatewayTimeout
import akka.http.scaladsl.model.{HttpRequest, StatusCode, StatusCodes}
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchBaseClient._
import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchFailure.{ElasticServerError, ElasticUnexpectedError}
import ch.epfl.bluebrain.nexus.commons.http.HttpClient.UntypedHttpClient
import ch.epfl.bluebrain.nexus.sourcing.RetryStrategyConfig
import com.typesafe.scalalogging.Logger
import retry.CatsEffect._
import retry.syntax.all._
import retry.{RetryDetails, RetryPolicy}

import scala.util.control.NonFatal


  private[client] def sanitize(index: String, allowWildCard: Boolean): String = {
    val regex = if (allowWildCard) """[\s|"|\\|<|>|\||,|/|?]""" else """[\s|"|*|\\|<|>|\||,|/|?]"""
    index.replaceAll(regex, "_").dropWhile(_ == '_')
  }
}

object ElasticSearchBaseClient {
  private[client] val docType           = "_doc"
  private[client] val source            = "_source"
  private[client] val anyIndexPath      = "_all"
  private[client] val ignoreUnavailable = "ignore_unavailable"
  private[client] val allowNoIndices    = "allow_no_indices"
  private[client] val trackTotalHits    = "track_total_hits"
} 
Example 176
Source File: AbstractHttpClient.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.clients

import cats.effect.{Sync, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.cli.CliError.ClientError
import ch.epfl.bluebrain.nexus.cli.CliError.ClientError.{SerializationError, Unexpected}
import ch.epfl.bluebrain.nexus.cli.config.EnvConfig
import ch.epfl.bluebrain.nexus.cli.{logRetryErrors, ClientErrOr, Console}
import io.circe.Decoder
import org.http4s.circe.CirceEntityDecoder._
import org.http4s.client.Client
import org.http4s.{Request, Response}
import retry.CatsEffect._
import retry.RetryPolicy
import retry.syntax.all._

import scala.reflect.ClassTag
import scala.util.control.NonFatal

class AbstractHttpClient[F[_]: Timer](client: Client[F], env: EnvConfig)(implicit
    protected val F: Sync[F],
    protected val console: Console[F]
) {

  protected val retry                                = env.httpClient.retry
  protected def successCondition[A]                  = retry.condition.notRetryFromEither[A] _
  implicit protected val retryPolicy: RetryPolicy[F] = retry.retryPolicy
  implicit protected def logOnError[A]               = logRetryErrors[F, A]("interacting with an HTTP API")

  protected def executeDiscard[A](req: Request[F], returnValue: => A): F[ClientErrOr[A]] =
    execute(req, _.body.compile.drain.as(Right(returnValue)))

  protected def executeParse[A: Decoder](req: Request[F])(implicit A: ClassTag[A]): F[ClientErrOr[A]] =
    execute(
      req,
      _.attemptAs[A].value.map(
        _.leftMap(err =>
          SerializationError(err.message, s"The response payload was not of type '${A.runtimeClass.getSimpleName}'")
        )
      )
    )

  private def execute[A](req: Request[F], f: Response[F] => F[ClientErrOr[A]]): F[ClientErrOr[A]] =
    client
      .fetch(req)(ClientError.errorOr[F, A](r => f(r)))
      .recoverWith {
        case NonFatal(err) => F.delay(Left(Unexpected(Option(err.getMessage).getOrElse("").take(30))))
      }
      .retryingM(successCondition[A])
} 
Example 177
Source File: Routes.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.routes

import akka.http.scaladsl.model.headers.{`WWW-Authenticate`, HttpChallenges}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route}
import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClient.Caller
import ch.epfl.bluebrain.nexus.storage.StorageError._
import ch.epfl.bluebrain.nexus.storage.config.AppConfig
import ch.epfl.bluebrain.nexus.storage.config.AppConfig._
import ch.epfl.bluebrain.nexus.storage.routes.AuthDirectives._
import ch.epfl.bluebrain.nexus.storage.routes.PrefixDirectives._
import ch.epfl.bluebrain.nexus.storage.routes.instances._
import ch.epfl.bluebrain.nexus.storage.{AkkaSource, IamIdentitiesClient, Rejection, StorageError, Storages}
import com.typesafe.scalalogging.Logger
import monix.eval.Task

import scala.util.control.NonFatal

object Routes {

  private[this] val logger = Logger[this.type]

  
  def apply(
      storages: Storages[Task, AkkaSource]
  )(implicit config: AppConfig, identities: IamIdentitiesClient[Task]): Route =
    //TODO: Fetch Bearer token and verify identity
    wrap {
      concat(
        AppInfoRoutes(config.description).routes,
        (pathPrefix(config.http.prefix) & extractToken) { implicit token =>
          extractCaller.apply {
            case Caller(config.subject.subjectValue, _) => StorageRoutes(storages).routes
            case _                                      => failWith(AuthenticationFailed)
          }
        }
      )
    }

} 
Example 178
Source File: AttributesCache.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.attributes

import java.nio.file.Path
import java.time.Clock

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.{ask, AskTimeoutException}
import akka.util.Timeout
import cats.effect.{ContextShift, Effect, IO}
import cats.implicits._
import ch.epfl.bluebrain.nexus.storage.File.FileAttributes
import ch.epfl.bluebrain.nexus.storage.StorageError.{InternalError, OperationTimedOut}
import ch.epfl.bluebrain.nexus.storage.attributes.AttributesCacheActor.Protocol._
import ch.epfl.bluebrain.nexus.storage.config.AppConfig.DigestConfig
import com.typesafe.scalalogging.Logger

import scala.util.control.NonFatal

trait AttributesCache[F[_]] {

  
  def asyncComputePut(filePath: Path, algorithm: String): Unit
}

object AttributesCache {
  private[this] val logger = Logger[this.type]

  def apply[F[_], Source](implicit
      system: ActorSystem,
      clock: Clock,
      tm: Timeout,
      F: Effect[F],
      computation: AttributesComputation[F, Source],
      config: DigestConfig
  ): AttributesCache[F] =
    apply(system.actorOf(AttributesCacheActor.props(computation)))

  private[attributes] def apply[F[_]](
      underlying: ActorRef
  )(implicit system: ActorSystem, tm: Timeout, F: Effect[F]): AttributesCache[F] =
    new AttributesCache[F] {
      implicit private val contextShift: ContextShift[IO] = IO.contextShift(system.dispatcher)

      override def get(filePath: Path): F[FileAttributes] =
        IO.fromFuture(IO.shift(system.dispatcher) >> IO(underlying ? Get(filePath)))
          .to[F]
          .flatMap[FileAttributes] {
            case attributes: FileAttributes => F.pure(attributes)
            case other                      =>
              logger.error(s"Received unexpected reply from the file attributes cache: '$other'")
              F.raiseError(InternalError("Unexpected reply from the file attributes cache"))
          }
          .recoverWith {
            case _: AskTimeoutException =>
              F.raiseError(OperationTimedOut("reply from the file attributes cache timed out"))
            case NonFatal(th)           =>
              logger.error("Exception caught while exchanging messages with the file attributes cache", th)
              F.raiseError(InternalError("Exception caught while exchanging messages with the file attributes cache"))
          }

      override def asyncComputePut(filePath: Path, algorithm: String): Unit =
        underlying ! Compute(filePath)

    }
} 
Example 179
Source File: ShoppingCartClient.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.samples

import akka.actor.ActorSystem
import akka.grpc.GrpcClientSettings
import akka.stream.ActorMaterializer
import com.example.shoppingcart.shoppingcart._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.control.NonFatal

object ShoppingCartClient {
  def main(args: Array[String]): Unit = {

    val client = new ShoppingCartClient("localhost", 9000, None)

    val userId = "viktor"
    val productId = "1337"
    val productName = "h4x0r"

    try {
      println(client.getCart(userId))
      for (_ <- 1 to 8) {
        client.addItem(userId, productId, productName, 1)
      }
      println(client.getCart(userId))
      client.removeItem(userId, productId)
      println(client.getCart(userId))
    } catch {
      case NonFatal(e) => e.printStackTrace()
    } finally {
      try {
        client.shutdown()
      } finally {
        System.exit(0)
      }
    }
  }
}


class ShoppingCartClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) {
  def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) =
    this(hostname, port, hostnameOverride, ActorSystem())
  private implicit val system = sys
  private implicit val materializer = ActorMaterializer()

  import sys.dispatcher

  val settings = {
    val s = GrpcClientSettings.connectToServiceAt(hostname, port).withTls(false)
    hostnameOverride.fold(s)(host => s.withChannelBuilderOverrides(_.overrideAuthority(host)))
  }
  println(s"Connecting to $hostname:$port")
  val service = com.example.shoppingcart.shoppingcart.ShoppingCartClient(settings)

  def shutdown(): Unit = {
    await(service.close())
    await(system.terminate())
  }

  def await[T](future: Future[T]): T = Await.result(future, 10.seconds)

  def getCart(userId: String) = await(service.getCart(GetShoppingCart(userId)))
  def addItem(userId: String, productId: String, name: String, quantity: Int) =
    await(service.addItem(AddLineItem(userId, productId, name, quantity)))
  def removeItem(userId: String, productId: String) = await(service.removeItem(RemoveLineItem(userId, productId)))
} 
Example 180
Source File: PublishData.scala    From scaladex   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package ch.epfl.scala.index
package server
package routes
package api
package impl

import data.{DataPaths, LocalPomRepository}
import data.github
import ch.epfl.scala.index.model.misc.Sha1
import org.joda.time.DateTime
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path, Paths}

import org.slf4j.LoggerFactory

import scala.util.control.NonFatal


  private def tmpPath(sha1: String): Path = {
    val tmpDir =
      Files.createTempDirectory(Paths.get(Server.config.tempDirPath), sha1)
    Files.createTempFile(tmpDir, "", "")
  }
} 
Example 181
Source File: ErrorUtil.scala    From pureconfig   with Mozilla Public License 2.0 5 votes vote down vote up
package pureconfig.backend

import scala.util.control.NonFatal

import com.typesafe.config._
import pureconfig._
import pureconfig.error._


object ErrorUtil {

  def unsafeToReaderResult[A](f: => A, onIOFailure: Option[Option[Throwable] => CannotRead] = None): ConfigReader.Result[A] = {
    try Right(f) catch {
      case e: ConfigException.IO if onIOFailure.nonEmpty =>
        ConfigReader.Result.fail(onIOFailure.get(Option(e.getCause)))

      case e: ConfigException.Parse =>
        val msg = (if (e.origin != null)
          // Removing the error origin from the exception message since origin is stored and used separately:
          e.getMessage.stripPrefix(s"${e.origin.description}: ")
        else
          e.getMessage).stripSuffix(".")
        ConfigReader.Result.fail(CannotParse(msg, Some(e.origin())))

      case e: ConfigException =>
        ConfigReader.Result.fail(ThrowableFailure(e, Some(e.origin())))

      case NonFatal(e) =>
        ConfigReader.Result.fail(ThrowableFailure(e, None))
    }
  }
} 
Example 182
Source File: KubeServiceLocatorServer.scala    From lagom-on-kube   with Apache License 2.0 5 votes vote down vote up
package me.alexray.lagom.kube.discovery

import java.io.Closeable
import java.net.URI
import java.util.{Map => JMap}

import com.lightbend.lagom.gateway.{ServiceGateway, ServiceGatewayConfig, ServiceGatewayFactory}
import me.alexray.lagom.kube.discovery.impl.KubeServiceRegistryModule
import me.alexray.lagom.kube.gateway.{KubeServiceGateway, KubeServiceGatewayConfig, KubeServiceGatewayFactory}
import play.api.Application
import play.api.Logger
import play.api.Mode
import play.api.Play
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.inject.guice.GuiceableModule.fromGuiceModule
import play.core.server.ServerConfig
import play.core.server.ServerProvider
import play.core.server.ServerWithStop

import scala.util.control.NonFatal

class KubeServiceLocatorServer extends Closeable {
  private val logger: Logger = Logger(this.getClass)

  @volatile private var server: ServerWithStop = _
  @volatile private var gateway: KubeServiceGateway = _

  def start(serviceLocatorPort: Int, serviceGatewayPort: Int, unmanagedServices: JMap[String, String]): Unit = synchronized {
    require(server == null, "Service locator is already running on " + server.mainAddress)

    val application = createApplication(KubeServiceGatewayConfig(serviceGatewayPort), unmanagedServices)
    Play.start(application)
    try {
      server = createServer(application, serviceLocatorPort)
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException(s"Unable to start service locator on port $serviceLocatorPort", e)
    }
    try {
      gateway = application.injector.instanceOf[KubeServiceGatewayFactory].start()
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException(s"Unable to start service gateway on port $serviceGatewayPort", e)
    }
    logger.info("Service locator can be reached at " + serviceLocatorAddress)
    logger.info("Service gateway can be reached at " + serviceGatewayAddress)
  }

  private def createApplication(serviceGatewayConfig: KubeServiceGatewayConfig, unmanagedServices: JMap[String, String]): Application = {
    new GuiceApplicationBuilder()
      .overrides(KubeServiceRegistryModule(serviceGatewayConfig, unmanagedServices))
      .build()
  }

  private def createServer(application: Application, port: Int): ServerWithStop = {
    val config = ServerConfig(port = Some(port), mode = Mode.Test)
    val provider = implicitly[ServerProvider]
    provider.createServer(config, application)
  }

  override def close(): Unit = synchronized {
    if (server == null) Logger.logger.debug("Service locator was already stopped")
    else {
      logger.debug("Stopping service locator...")
      server.stop()
      server = null
      logger.info("Service locator stopped")
    }
  }

  def serviceLocatorAddress: URI = {
    // Converting InetSocketAddress into URL is not that simple.
    // Because we know the service locator is running locally, I'm hardcoding the hostname and protocol.
    new URI(s"http://localhost:${server.mainAddress.getPort}")
  }

  def serviceGatewayAddress: URI = {
    new URI(s"http://localhost:${gateway.address.getPort}")
  }
} 
Example 183
Source File: FileUpload.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.publish.upload

import java.nio.file.{Files, Path}

import coursier.core.Authentication
import coursier.paths.Util
import coursier.publish.upload.logger.UploadLogger
import coursier.util.Task

import scala.util.control.NonFatal


final case class FileUpload(base: Path) extends Upload {
  private val base0 = base.normalize()
  def upload(
    url: String,
    authentication: Option[Authentication],
    content: Array[Byte],
    logger: UploadLogger,
    loggingId: Option[Object]
  ): Task[Option[Upload.Error]] = {

    val p = base0.resolve(url).normalize()
    if (p.startsWith(base0))
      Task.delay {
        logger.uploading(url, loggingId, Some(content.length))
        val errorOpt = try {
          Util.createDirectories(p.getParent)
          Files.write(p, content)
          None
        } catch {
          case NonFatal(e) =>
            Some(e)
        }
        logger.uploaded(url, loggingId, errorOpt.map(e => new Upload.Error.FileException(e)))

        None
      }
    else
      Task.fail(new Exception(s"Invalid path: $url (base: $base0, p: $p)"))
  }
} 
Example 184
Source File: FileDownload.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.publish.download

import java.nio.file.{Files, Path}
import java.time.Instant

import coursier.core.Authentication
import coursier.publish.download.logger.DownloadLogger
import coursier.util.Task

import scala.util.control.NonFatal


final case class FileDownload(base: Path) extends Download {
  private val base0 = base.normalize()
  def downloadIfExists(
    url: String,
    authentication: Option[Authentication],
    logger: DownloadLogger
  ): Task[Option[(Option[Instant], Array[Byte])]] = {

    val p = base0.resolve(url).normalize()
    if (p.startsWith(base0))
      Task.delay {
        logger.downloadingIfExists(url)
        val res = try {
          if (Files.isRegularFile(p)) {
            val lastModified = Files.getLastModifiedTime(p).toInstant
            Right(Some((Some(lastModified), Files.readAllBytes(p))))
          } else
            Right(None)
        } catch {
          case NonFatal(e) =>
            Left(e)
        }
        logger.downloadedIfExists(
          url,
          res.toOption.flatMap(_.map(_._2.length)),
          res.left.toOption.map(e => new Download.Error.FileException(e))
        )

        Task.fromEither(res)
      }.flatMap(identity)
    else
      Task.fail(new Exception(s"Invalid path: $url (base: $base0, p: $p)"))
  }
} 
Example 185
Source File: MonadlessEitherThrowable.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.cli.util

import io.monadless.Monadless

import scala.util.control.NonFatal

class MonadlessEitherThrowable extends Monadless[({ type L[T] = Either[Throwable, T] })#L] {

  def apply[T](v: => T): Either[Throwable, T] =
    Right(v)

  def collect[T](list: List[Either[Throwable, T]]): Either[Throwable, List[T]] =
    list
      .foldLeft[Either[Throwable, List[T]]](Right(Nil)) {
        (acc, elem) =>
          for {
            acc0 <- acc
            t <- elem
          } yield t :: acc0
      }
      .map(_.reverse)

  def rescue[T](m: Either[Throwable, T])(pf: PartialFunction[Throwable, Either[Throwable, T]]): Either[Throwable, T] =
    m.left.flatMap { e =>
      if (pf.isDefinedAt(e))
        pf(e)
      else
        Left(e)
    }

  def ensure[T](m: Either[Throwable, T])(f: => Unit): Either[Throwable, T] = {
    try f
    catch {
      case NonFatal(_) => ()
    }
    m
  }

}

object MonadlessEitherThrowable extends MonadlessEitherThrowable 
Example 186
Source File: StringToTimestampParser.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.infra.parser

import java.text.{DateFormat, SimpleDateFormat}
import java.util.TimeZone

import com.aol.one.dwh.infra.util.{ExceptionPrinter, LogTrait}

import scala.util.control.NonFatal
import scala.util.{Failure, Try}


object StringToTimestampParser extends LogTrait with ExceptionPrinter {

  def parse(value: String, format: String): Option[Long] = {

    Try {
      val dateFormat: DateFormat = new SimpleDateFormat(format)
      dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"))
      dateFormat.parse(value).getTime
    }.recoverWith {
        case NonFatal(e) =>
          logger.error(s"Could not parse value:[$value] using format:[$format]. Catching exception {}", e.getStringStackTrace)
          Failure(e)
    }.toOption
  }
} 
Example 187
Source File: BandarlogsFactory.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.bandarlog

import com.aol.one.dwh.bandarlog.connectors.KafkaConnector
import com.aol.one.dwh.bandarlog.metrics._
import com.aol.one.dwh.bandarlog.providers.ProviderFactory
import com.aol.one.dwh.bandarlog.reporters.{CustomTags, MetricReporter, RegistryFactory}
import com.aol.one.dwh.bandarlog.scheduler.Scheduler
import com.aol.one.dwh.infra.config.RichConfig._
import com.aol.one.dwh.infra.kafka.KafkaCluster
import com.aol.one.dwh.infra.sql.pool.ConnectionPoolHolder
import com.aol.one.dwh.infra.util.{ExceptionPrinter, LogTrait}
import com.typesafe.config.Config

import scala.collection.JavaConversions._
import scala.util.control.NonFatal
import scala.util.{Failure, Try}

class BandarlogsFactory(mainConfig: Config) extends LogTrait with ExceptionPrinter {

  def create(): Seq[Bandarlog[_]] = {
    val bandarlogIds = mainConfig.getObject("bandarlogs").keys.toSeq
    logger.info(s"Defined bandarlog ids:[${bandarlogIds.mkString(",")}]")

    val connectionPoolHolder = new ConnectionPoolHolder(mainConfig)

    val bandarlogs = bandarlogIds.map(id => id -> mainConfig.getConfig(s"bandarlogs.$id")).filter { case (id, bandarlogConf) =>
      val enabled = bandarlogConf.isEnabled
      logger.info(s"Bandarlog:[$id] Enabled:[$enabled]")
      enabled
    }.map { case (id, bandarlogConf) =>
      logger.info(s"Creating bandarlog:[$id]...")

      Try {
        val metricProviders = createMetricProviders(bandarlogConf, connectionPoolHolder)
        val reporters = createReporters(bandarlogConf, metricProviders.map(_.metric))
        new Bandarlog(metricProviders, reporters, new Scheduler(bandarlogConf.getSchedulerConfig))

      }.recoverWith {
        case NonFatal(e) =>
          logger.error(s"Can't create bandarlog:[$id]. Catching exception {}", e.getStringStackTrace)
          Failure(e)
      }.toOption
    }

    bandarlogs.filter(_.isDefined).map(_.get)
  }

  private def createMetricProviders(bandarlogConf: Config, connectionPoolHolder: ConnectionPoolHolder) = {
    bandarlogConf.getBandarlogType match {
      case "kafka" => kafkaMetricProviders(bandarlogConf)
      case "sql" => metricProviders(bandarlogConf, connectionPoolHolder)
      case t => throw new IllegalArgumentException(s"Unsupported bandarlog type:[$t]")
    }
  }

  private def kafkaMetricProviders(bandarlogConf: Config) = {
    val metricsPrefix = bandarlogConf.getReportConfig.prefix
    val kafkaConfig = mainConfig.getKafkaConfig(bandarlogConf.getConnector)
    val kafkaConnector = new KafkaConnector(KafkaCluster(kafkaConfig))
    val kafkaMetricFactory = new KafkaMetricFactory(kafkaConnector)

    bandarlogConf.getKafkaTopics.flatMap { topic =>
      bandarlogConf.getMetrics.map(metricId => kafkaMetricFactory.create(metricId, metricsPrefix, topic))
    }
  }

  private def metricProviders(bandarlogConf: Config, connectionPoolHolder: ConnectionPoolHolder) = {
    val metricsPrefix = bandarlogConf.getReportConfig.prefix
    val providerFactory = new ProviderFactory(mainConfig, connectionPoolHolder)
    val metricFactory = new MetricFactory(providerFactory)

    bandarlogConf.getTables.flatMap { case (inTable, outTable) =>
      bandarlogConf.getMetrics.flatMap { metricId =>
        metricFactory.create(metricId, metricsPrefix, bandarlogConf.getInConnector, bandarlogConf.getOutConnectors, inTable, outTable)
      }
    }
  }

  private def createReporters[V](bandarlogConf: Config, metrics: Seq[Metric[V]]) = {
    metrics.flatMap { metric =>
      val tags = metric.tags ++ CustomTags(bandarlogConf)
      val metricRegistry = RegistryFactory.createWithMetric(metric)

      bandarlogConf.getReporters.map(reporter => MetricReporter(reporter, tags, metricRegistry, mainConfig, bandarlogConf.getReportConfig))
    }
  }
} 
Example 188
Source File: Bandarlog.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.bandarlog

import com.aol.one.dwh.bandarlog.metrics.MetricProvider
import com.aol.one.dwh.bandarlog.reporters.MetricReporter
import com.aol.one.dwh.bandarlog.scheduler.Scheduler
import com.aol.one.dwh.infra.util.{ExceptionPrinter, LogTrait}

import scala.util.Try
import scala.util.control.NonFatal


class Bandarlog[V](
    providers: Seq[MetricProvider[V]],
    reporters: Seq[MetricReporter],
    scheduler: Scheduler
  ) extends LogTrait with ExceptionPrinter {

  def execute(): Unit = {
    scheduler.schedule(() =>
      providers.foreach { metricProvider =>
        val metric = metricProvider.metric
        val provider = metricProvider.provider

        Try {
          val value = provider.provide()
          metric.value.setValue(value.getValue)
        }.recover {
          case NonFatal(e) =>
            logger.error("Catching exception {}", e.getStringStackTrace)
            metric.value.setValue(None)
        }

        logger.info(s"Metric:[${metric.prefix}.${metric.name}] Tags:[${metric.tags.mkString(",")}] Value:[${metric.value.getValue}]")
      })

    reporters.foreach(_.start())
  }

  def shutdown(): Unit = {
    reporters.foreach(_.stop())
    scheduler.shutdown()
  }
} 
Example 189
Source File: DeltaLogging.scala    From delta   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.delta.metering

import scala.util.Try
import scala.util.control.NonFatal

import com.databricks.spark.util.{DatabricksLogging, OpType, TagDefinition}
import com.databricks.spark.util.MetricDefinitions.{EVENT_LOGGING_FAILURE, EVENT_TAHOE}
import com.databricks.spark.util.TagDefinitions.{TAG_OP_TYPE, TAG_TAHOE_ID, TAG_TAHOE_PATH}
import org.apache.spark.sql.delta.DeltaLog
import org.apache.spark.sql.delta.util.DeltaProgressReporter
import org.apache.spark.sql.delta.util.JsonUtils


  protected def recordDeltaOperation[A](
      deltaLog: DeltaLog,
      opType: String,
      tags: Map[TagDefinition, String] = Map.empty)(
      thunk: => A): A = {
    val tableTags = if (deltaLog != null) {
      Map(
        TAG_TAHOE_PATH -> Try(deltaLog.dataPath.toString).getOrElse(null),
        TAG_TAHOE_ID -> Try(deltaLog.snapshot.metadata.id).getOrElse(null))
    } else {
      Map.empty
    }
    recordOperation(
      new OpType(opType, ""),
      extraTags = tableTags ++ tags) {thunk}
  }
} 
Example 190
Source File: FileMonitor.scala    From better-files   with MIT License 5 votes vote down vote up
package better.files

import java.nio.file._

import scala.concurrent.{blocking, ExecutionContext}
import scala.util.Try
import scala.util.control.NonFatal


  protected[this] def reactTo(target: File) = root.isDirectory || root.isSamePathAs(target)

  protected[this] def process(key: WatchKey) = {
    val path = key.watchable().asInstanceOf[Path]

    import scala.collection.JavaConverters._
    key.pollEvents().asScala foreach {
      case event: WatchEvent[Path] @unchecked if (event.context() != null) =>
        val target: File = path.resolve(event.context())
        if (reactTo(target)) {
          if (event.kind() == StandardWatchEventKinds.ENTRY_CREATE) {
            val depth = root.relativize(target).getNameCount
            watch(target, (maxDepth - depth) max 0) // auto-watch new files in a directory
          }
          onEvent(event.kind(), target, event.count())
        }
      case event => if (reactTo(path)) onUnknownEvent(event)
    }
    key.reset()
  }

  protected[this] def watch(file: File, depth: Int): Unit = {
    def toWatch: Iterator[File] =
      if (file.isDirectory) {
        file.walk(depth).filter(f => f.isDirectory && f.exists)
      } else {
        when(file.exists)(file.parent).iterator // There is no way to watch a regular file; so watch its parent instead
      }
    try {
      toWatch.foreach(f => Try[Unit](f.register(service)).recover { case e => onException(e) }.get)
    } catch {
      case NonFatal(e) => onException(e)
    }
  }

  override def start()(implicit executionContext: ExecutionContext) = {
    watch(root, maxDepth)
    executionContext.execute(new Runnable {
      override def run() = blocking { Iterator.continually(service.take()).foreach(process) }
    })
  }

  override def close() = service.close()

  // Although this class is abstract, we provide noop implementations so user can choose to implement a subset of these
  override def onCreate(file: File, count: Int)     = {}
  override def onModify(file: File, count: Int)     = {}
  override def onDelete(file: File, count: Int)     = {}
  override def onUnknownEvent(event: WatchEvent[_]) = {}
  override def onException(exception: Throwable)    = {}
} 
Example 191
Source File: Metrics.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.client

import java.util.concurrent.TimeUnit

import akka.actor.Actor
import cool.graph.cuid.Cuid
import cool.graph.shared.externalServices.KinesisPublisher
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import spray.json.{JsArray, JsBoolean, JsNumber, JsObject, JsString}

import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

object FeatureMetric extends Enumeration {
  type FeatureMetric = Value
  val Subscriptions           = Value("backend/api/subscriptions")
  val Filter                  = Value("backend/feature/filter")
  val NestedMutations         = Value("backend/feature/nested-mutation")
  val ApiSimple               = Value("backend/api/simple")
  val ApiRelay                = Value("backend/api/relay")
  val ApiFiles                = Value("backend/api/files")
  val ServersideSubscriptions = Value("backend/feature/sss")
  val RequestPipeline         = Value("backend/feature/rp") // add this!
  val PermissionQuery         = Value("backend/feature/permission-queries") // add this!
  val Authentication          = Value("backend/feature/authentication")
  val Algolia                 = Value("backend/feature/algolia") // add this!
  val Auth0                   = Value("backend/feature/integration-auth0")
  val Digits                  = Value("backend/feature/integration-digits")
}

case class ApiFeatureMetric(ip: String,
                            date: DateTime,
                            projectId: String,
                            clientId: String,
                            usedFeatures: List[String],
                            // Should be false when we can't determine. This is the case for subscriptions.
                            // Is always false for File api.
                            isFromConsole: Boolean)

class FeatureMetricActor(
    metricsPublisher: KinesisPublisher,
    interval: Int
) extends Actor {
  import context.dispatcher

  val metrics = mutable.Buffer.empty[ApiFeatureMetric]
  val FLUSH   = "FLUSH"
  val tick = context.system.scheduler.schedule(
    initialDelay = FiniteDuration(interval, TimeUnit.SECONDS),
    interval = FiniteDuration(interval, TimeUnit.SECONDS),
    receiver = self,
    message = FLUSH
  )

  override def postStop() = tick.cancel()

  def receive = {
    case metric: ApiFeatureMetric =>
      metrics += metric

    case FLUSH =>
      flushMetrics()
  }

  def flushMetrics() = {
    val byProject = metrics.groupBy(_.projectId) map {
      case (projectId, metrics) =>
        JsObject(
          "requestCount"        -> JsNumber(metrics.length),
          "projectId"           -> JsString(projectId),
          "usedIps"             -> JsArray(metrics.map(_.ip).distinct.take(10).toVector.map(JsString(_))),
          "features"            -> JsArray(metrics.flatMap(_.usedFeatures).distinct.toVector.map(JsString(_))),
          "date"                -> JsString(metrics.head.date.toString(DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z").withZoneUTC())),
          "version"             -> JsString("1"),
          "justConsoleRequests" -> JsBoolean(metrics.forall(_.isFromConsole))
        )
    }

    byProject.foreach { json =>
      try {
        metricsPublisher.putRecord(json.toString, shardId = Cuid.createCuid())
      } catch {
        case NonFatal(e) => println(s"Putting kinesis FeatureMetric failed: ${e.getMessage} ${e.toString}")
      }
    }
    metrics.clear()
  }
} 
Example 192
Source File: RemoraApp.scala    From remora   with MIT License 5 votes vote down vote up
import java.io.IOException
import java.net.ConnectException
import java.util.concurrent.{TimeUnit, TimeoutException}

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision}
import com.amazonaws.services.cloudwatch.{AmazonCloudWatchAsync, AmazonCloudWatchAsyncClientBuilder}
import com.blacklocus.metrics.CloudWatchReporterBuilder
import com.codahale.metrics.jvm.{GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet}
import com.typesafe.scalalogging.LazyLogging
import config.{KafkaSettings, MetricsSettings}
import kafka.admin.RemoraKafkaConsumerGroupService
import reporter.RemoraDatadogReporter

import scala.concurrent.duration._
import scala.util.control.NonFatal

object RemoraApp extends App with nl.grons.metrics.scala.DefaultInstrumented with LazyLogging {

  private val actorSystemName: String = "remora"
  implicit val actorSystem = ActorSystem(actorSystemName)

  metricRegistry.registerAll(new GarbageCollectorMetricSet)
  metricRegistry.registerAll(new MemoryUsageGaugeSet)
  metricRegistry.registerAll(new ThreadStatesGaugeSet)

  lazy val decider: Supervision.Decider = {
    case _: IOException | _: ConnectException | _: TimeoutException => Supervision.Restart
    case NonFatal(err: Throwable) =>
      actorSystem.log.error(err, "Unhandled Exception in Stream: {}", err.getMessage)
      Supervision.Stop
  }

  implicit val materializer = ActorMaterializer(
    ActorMaterializerSettings(actorSystem).withSupervisionStrategy(decider))(actorSystem)

  implicit val executionContext = actorSystem.dispatchers.lookup("kafka-consumer-dispatcher")
  val kafkaSettings = KafkaSettings(actorSystem.settings.config)
  val consumer = new RemoraKafkaConsumerGroupService(kafkaSettings)
  val kafkaClientActor = actorSystem.actorOf(KafkaClientActor.props(consumer), name = "kafka-client-actor")

  Api(kafkaClientActor).start()

  val metricsSettings = MetricsSettings(actorSystem.settings.config)

  if (metricsSettings.registryOptions.enabled) {
    val exportConsumerMetricsToRegistryActor =
      actorSystem.actorOf(ExportConsumerMetricsToRegistryActor.props(kafkaClientActor),
        name = "export-consumer-metrics-actor")
    actorSystem.scheduler.schedule(0 second, metricsSettings.registryOptions.intervalSeconds second, exportConsumerMetricsToRegistryActor, "export")
  }

  if (metricsSettings.cloudWatch.enabled) {
    logger.info("Reporting metricsRegistry to Cloudwatch")
    val amazonCloudWatchAsync: AmazonCloudWatchAsync = AmazonCloudWatchAsyncClientBuilder.defaultClient

    new CloudWatchReporterBuilder()
      .withNamespace(metricsSettings.cloudWatch.name)
      .withRegistry(metricRegistry)
      .withClient(amazonCloudWatchAsync)
      .build()
      .start(metricsSettings.cloudWatch.intervalMinutes, TimeUnit.MINUTES)
  }

  if (metricsSettings.dataDog.enabled) {
    logger.info(s"Reporting metricsRegistry to Datadog at ${metricsSettings.dataDog.agentHost}:${metricsSettings.dataDog.agentPort}")
    val datadogReporter = new RemoraDatadogReporter(metricRegistry, metricsSettings.dataDog)
    datadogReporter.startReporter()
  }

} 
Example 193
Source File: Broker.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.boot

import java.net.ConnectException

import akka.actor.ActorSystem
import com.programmaticallyspeaking.ncd.chrome.domains.EventEmitHook
import com.programmaticallyspeaking.ncd.chrome.net.FilePublisher
import com.programmaticallyspeaking.ncd.config.Conf
import com.programmaticallyspeaking.ncd.host.{ScriptEvent, ScriptHost}
import com.programmaticallyspeaking.ncd.ioc.Container
import com.programmaticallyspeaking.ncd.messaging.Observer
import com.programmaticallyspeaking.ncd.nashorn.{NashornDebugger, NashornDebuggerConnector, NashornScriptHost}
import org.slf4s.Logging

import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

case class BrokerConnection(host: NashornScriptHost, disconnect: () => Unit)

class Broker(conf: Conf)(implicit actorSystem: ActorSystem) extends Logging {
  import scala.concurrent.ExecutionContext.Implicits._

  def connect(errorCallback: Option[Throwable] => Unit): Future[BrokerConnection] = {
    val connectAddr = conf.connect()
    val connector = new NashornDebuggerConnector(connectAddr.host, connectAddr.port)
    val debuggerReady = connector.connect().map(vm => new NashornDebugger().create(vm))

    val connectionPromise = Promise[BrokerConnection]()

    debuggerReady.onComplete {
      case Success(host) =>
        startListening(host, errorCallback)

        try {
          def disconnect(): Unit = {
            host.virtualMachine.inner.dispose()
          }
          // Writing just 'disconnect' results in compilation warning about deprecated ETA expansion.
          val conn = BrokerConnection(host, disconnect _)
          connectionPromise.success(conn)
        } catch {
          case NonFatal(t) =>
            log.error("Binding failed", t)
            connectionPromise.failure(new RuntimeException("connection failed"))
        }
      case Failure(t) =>
        t match {
          case _: ConnectException =>
            log.error("Failed to connect to the debug target.")
            log.error("Please make sure that the debug target is started with debug VM arguments, for example:")
            log.error(s"  -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=${connectAddr.host}:${connectAddr.port}")
          case _ =>
            log.error("Failed to start the debugger", t)
        }
        // Wrap in RuntimeException if needed, otherwise we'll get UndeclaredThrowableException wrapping the cause.
        val error = if (t.isInstanceOf[RuntimeException]) t else new RuntimeException(t)
        connectionPromise.failure(error)
    }

    connectionPromise.future
  }

  private def startListening(host: NashornScriptHost, errorCallback: Option[Throwable] => Unit) = {
    host.events.subscribe(new Observer[ScriptEvent] {
      override def onNext(item: ScriptEvent): Unit = {}

      override def onError(error: Throwable): Unit = {
        log.error("Unknown error", error)
        errorCallback(Some(error))
      }

      override def onComplete(): Unit = {
        log.info("The debug target disconnected")
        errorCallback(None)
      }
    })
  }
}

class BootContainer(filePublisher: FilePublisher, scriptHost: ScriptHost) extends Container(Seq(filePublisher, scriptHost, new EventEmitHook)) 
Example 194
Source File: Boot.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.boot

import java.util.logging.LogManager

import akka.actor.ActorSystem
import com.programmaticallyspeaking.ncd.config.Conf
import com.programmaticallyspeaking.ncd.infra.BuildProperties
import com.programmaticallyspeaking.ncd.nashorn.AttachingHostProxy
import org.slf4s.Logging

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

object Boot extends App with Logging {
  val conf = new Conf(args)

  implicit val system = ActorSystem()
  import scala.concurrent.ExecutionContext.Implicits._

  // Disable java.util logging (used by the Closure Compiler)
  LogManager.getLogManager.reset()

  log.info("NCDbg version: " + BuildProperties.version)
  log.info("NCDbg built with Java version: " + BuildProperties.buildJavaVersion)
  log.info("Local Java version: " + System.getProperty("java.version"))

  val lazyBehavior = conf.isLazy()

  val broker = new Broker(conf)

  val futureHost = if (lazyBehavior) {
    log.info("Starting in lazy mode. Will attach to the debug target upon a DevTools connection.")
    val proxy = new AttachingHostProxy(broker, 10.seconds) //TODO: Configurable time here
    Future.successful(proxy.createHost())
  } else {
    broker.connect({
      case Some(t) => die(2)
      case None => die(0)
    }).map(_.host)
  }

  futureHost.onComplete {
    case Success(host) =>
      val server = new Server(conf)
      try server.start(host) catch {
        case NonFatal(t) =>
          log.error("Failed to start server", t)
          die(3)
      }

    case Failure(t) =>
      // Assume Broker has logged the reason!
      die(3)
  }

  private def die(code: Int): Nothing = {
    system.terminate()
    System.exit(code)
    ???
  }
} 
Example 195
Source File: StreamReadingThread.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn

import java.io.{BufferedReader, IOException, InputStream, InputStreamReader}

import scala.util.control.NonFatal

class StreamReadingThread(in: InputStream, appender: (String) => Unit) extends Thread {
  override def run(): Unit = {
    try {
      val reader = new BufferedReader(new InputStreamReader(in))
      var str = ""
      while (str != null) {
        str = reader.readLine()
        Option(str).foreach(appender)
      }
    } catch {
      case _: InterruptedException =>
        // ok
      case ex: IOException if isStreamClosed(ex) =>
        // ok
      case NonFatal(t) =>
        t.printStackTrace(System.err)
    }
  }

  private def isStreamClosed(ex: IOException) = ex.getMessage.toLowerCase == "stream closed"
} 
Example 196
Source File: ScriptExecutor.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn

import java.io.{BufferedReader, InputStreamReader}

import com.programmaticallyspeaking.ncd.testing.StringUtils
import jdk.nashorn.api.scripting.NashornScriptEngineFactory

import scala.util.control.NonFatal

abstract class ScriptExecutorC extends App with ScriptExecutorBase {
  println("ScriptExecutor starting. Java version: " + System.getProperty("java.version"))
  val scriptEngine = new NashornScriptEngineFactory().getScriptEngine(nashornArgs: _*)
  val reader = new BufferedReader(new InputStreamReader(System.in))
  println(Signals.ready)
  waitForSignal(Signals.go)
  println("Got the go signal!")

  scriptEngine.eval(
    """this.createInstance = function (typeName) {
      |  var Type = Java.type(typeName);
      |  if (!Type) throw new Error("No such type: " + typeName);
      |  return new Type();
      |};
    """.stripMargin)

  while (true) {
    println("Awaiting script on stdin...")
    val script = StringUtils.fromBase64(readStdin())
    println("Got script: " + script)
    try {
      scriptEngine.eval(script)
      println("Script evaluation completed without errors")
    } catch {
      case NonFatal(t) =>
        t.printStackTrace(System.err)
    }
    println(Signals.scriptDone)
  }

  protected def nashornArgs: Array[String]
}

object ScriptExecutorNoJava extends ScriptExecutorC {
  override protected def nashornArgs = Array("--no-syntax-extensions", "--no-java")
}

object ScriptExecutor extends ScriptExecutorC {
  override protected def nashornArgs = Array("--no-syntax-extensions")
} 
Example 197
Source File: RealMarshallerTestFixture.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn

import java.lang.reflect.UndeclaredThrowableException

import com.programmaticallyspeaking.ncd.host._
import com.programmaticallyspeaking.ncd.messaging.Observer
import com.programmaticallyspeaking.ncd.testing.UnitTest

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Promise}
import scala.util.control.NonFatal

trait RealMarshallerTestFixture extends UnitTest with NashornScriptHostTestFixture {

  override implicit val executionContext: ExecutionContext = ExecutionContext.global

  protected def evaluateExpression(expr: String)(tester: (ScriptHost, ValueNode) => Unit): Unit = {
    val wrapped =
      s"""|(function (result) {
          |debugger;
          |})($expr);
         """.stripMargin
    val resultPromise = Promise[ValueNode]()
    val observer = new Observer[ScriptEvent] {
      override def onNext(item: ScriptEvent): Unit = item match {
        case bp: HitBreakpoint =>
          bp.stackFrames.headOption match {
            case Some(sf) =>
              val maybeResultLocal = sf.scopeChain.find(_.scopeType == ScopeType.Local).flatMap(s => {
                s.value match {
                  case obj: ObjectNode =>
                    try {
                      getHost.getObjectProperties(obj.objectId, true, false).find(_._1 == "result").flatMap(_._2.value)
                    } catch {
                      case NonFatal(t) =>
                        resultPromise.tryFailure(unpack(t))
                        None
                    }

                  case _ => None
                }
              })
              maybeResultLocal match {
                case Some(node) => resultPromise.success(node)
                case None => resultPromise.tryFailure(new Exception("No 'result' local"))
              }
            case None => resultPromise.tryFailure(new Exception("No stack frame"))
          }

        case _ => // ignore
      }

      override def onError(error: Throwable): Unit = resultPromise.tryFailure(error)

      override def onComplete(): Unit = {}
    }
    observeAndRunScriptAsync(wrapped, observer) { host =>
      resultPromise.future.map(node => {
        tester(host, node)
      })
    }
  }
} 
Example 198
Source File: Repl.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.repl

import java.io.BufferedReader

import javax.script.ScriptException

import scala.collection.mutable.ListBuffer
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

class Repl(reader: BufferedReader) {
  private val engine = new Engine
  private val heldLines = ListBuffer[String]()

  def run(): Unit = {
    loopAround()
  }

  private def loopAround(): Unit = {
    var done = false

    while (!done) {
      showPrompt
      val line = reader.readLine()
      if (line == ":quit") {
        done = true
      } else {
        evaluateLine(line)
      }
    }
  }

  private def evaluateLine(line: String): Unit = {
    val script = (heldLines :+ line).mkString("\n")
    engine.evaluate(script) match {
      case Success(result) =>
        heldLines.clear()
        println(result)

      case Failure(s: ScriptException) if continuationRequired(s) =>
        heldLines += line

      case Failure(NonFatal(t)) =>
        t.printStackTrace(System.out)
        println("")
    }
  }

  private def continuationRequired(s: ScriptException): Boolean = {
    val msg = s.getMessage
    // E.g.: javax.script.ScriptException: <eval>:1:12 Expected } but found eof
    msg.contains("but found eof")
  }

  private def showPrompt = {
    val prompt = if (heldLines.isEmpty) "> " else "| "
    print(prompt)
    System.out.flush()
  }
} 
Example 199
Source File: CassandraHealthCheck.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.healthcheck

import akka.actor.ActorSystem
import akka.event.Logging
import akka.pattern.{ ask, AskTimeoutException }
import akka.persistence.Persistence
import akka.persistence.cassandra.PluginSettings
import akka.persistence.cassandra.journal.CassandraJournal.HealthCheckQuery
import akka.util.Timeout

import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.control.NonFatal

final class CassandraHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) {

  private val log = Logging.getLogger(system, getClass)

  private val settings = new PluginSettings(system, system.settings.config.getConfig("akka.persistence.cassandra"))
  private val healthCheckSettings = settings.healthCheckSettings
  private val journalPluginId = s"${healthCheckSettings.pluginLocation}.journal"
  private val journalRef = Persistence(system).journalFor(journalPluginId)

  private implicit val ec: ExecutionContextExecutor = system.dispatchers.lookup(s"$journalPluginId.plugin-dispatcher")
  private implicit val timeout: Timeout = healthCheckSettings.timeout

  override def apply(): Future[Boolean] = {
    (journalRef ? HealthCheckQuery).map(_ => true).recoverWith {
      case _: AskTimeoutException =>
        log.warning("Failed to execute health check due to ask timeout")
        Future.successful(false)
      case NonFatal(e) =>
        log.warning("Failed to execute health check due to: {}", e)
        Future.successful(false)
    }
  }
} 
Example 200
Source File: TypeMap.scala    From scala-db-codegen   with Apache License 2.0 5 votes vote down vote up
package com.geirsson.codegen

import scala.util.control.NonFatal

import caseapp.core.ArgParser

object TypeMap {
  implicit val parser: ArgParser[TypeMap] =
    ArgParser.instance[TypeMap] { s =>
      try {
        val pairs = s.split(";").map { pair =>
          val from :: to :: Nil = pair.split(",", 2).toList
          from -> to
        }
        Right(TypeMap(pairs: _*))
      } catch {
        case NonFatal(e) =>
          Left(s"invalid typeMap $s. Expected format from1,to1;from2,to2")
      }
    }
  val default = TypeMap(
    "text" -> "String",
    "float8" -> "Double",
    "numeric" -> "BigDecimal",
    "int4" -> "Int",
    "int8" -> "Long",
    "bool" -> "Boolean",
    "varchar" -> "String",
    "serial" -> "Int",
    "bigserial" -> "Long",
    "timestamp" -> "java.util.Date",
    "bytea" -> "Array[Byte]", // PostgreSQL
    "uuid" -> "java.util.UUID", // H2, PostgreSQL
    "json" -> "String" // PostgreSQL
  )
}

case class TypeMap(pairs: (String, String)*)