scala.util.Failure Scala Examples

The following examples show how to use scala.util.Failure. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: DateTimeTools.scala    From pertax-frontend   with Apache License 2.0 9 votes vote down vote up
package util

import com.google.inject.{Inject, Singleton}
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter}
import org.joda.time.{DateTime, _}
import play.api.Logger
import uk.gov.hmrc.time.CurrentTaxYear

import scala.util.{Failure, Success, Try}

import java.time.{LocalDateTime => JavaLDT}

object DateTimeTools extends CurrentTaxYear {

  //Timezone causing problem on dev server
  val defaultTZ = DateTimeZone.forID("Europe/London")
  val unixDateFormat = "yyyy-MM-dd"
  val unixDateTimeFormat = "yyyy-MM-dd'T'HH:mm:ss"
  val humanDateFormat = "dd MMMMM yyyy"

  //Returns for example 1516 in March 2016
  def previousAndCurrentTaxYear = previousAndCurrentTaxYearFromGivenYear(current.currentYear)

  def previousAndCurrentTaxYearFromGivenYear(year: Int) = {
    def y = year

    (y - 1).toString.takeRight(2) + (y).toString.takeRight(2)
  }

  private def formatter(pattern: String): DateTimeFormatter = DateTimeFormat.forPattern(pattern).withZone(defaultTZ)

  def short(dateTime: LocalDate) = formatter("dd/MM/yyy").print(dateTime)

  def asHumanDateFromUnixDate(unixDate: String): String =
    Try(DateTimeFormat.forPattern(humanDateFormat).print(DateTime.parse(unixDate))) match {
      case Success(v) => v
      case Failure(e) => {
        Logger.warn("Invalid date parse in DateTimeTools.asHumanDateFromUnixDate: " + e)
        unixDate
      }
    }

  def toPaymentDate(dateTime: JavaLDT): LocalDate =
    new LocalDate(dateTime.getYear, dateTime.getMonthValue, dateTime.getDayOfMonth)

  override def now: () => DateTime = DateTime.now
}

@Singleton
class DateTimeTools @Inject()() {

  def showSendTaxReturnByPost = {

    val start = new DateTime(s"${DateTime.now().getYear}-11-01T00:00:00Z")
    val end = new DateTime(s"${DateTime.now().getYear + 1}-01-31T23:59:59Z")
    !DateTime.now().isAfter(start) && DateTime.now().isBefore(end)
  }
} 
Example 2
Source File: AbstractWebServer.scala    From ohara   with Apache License 2.0 6 votes vote down vote up
package oharastream.ohara.shabondi.common

import akka.Done
import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.{Directives, Route}
import akka.http.scaladsl.settings.ServerSettings
import oharastream.ohara.common.util.Releasable

import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.io.StdIn
import scala.util.{Failure, Success}


private[shabondi] abstract class AbstractWebServer extends Directives with Releasable {
  implicit protected val actorSystem: ActorSystem = ActorSystem(Logging.simpleName(this).replaceAll("\\$", ""))

  protected def routes: Route

  protected def postBinding(binding: ServerBinding): Unit = {
    val hostname = binding.localAddress.getHostName
    val port     = binding.localAddress.getPort
    actorSystem.log.info(s"Server online at http://$hostname:$port/")
  }

  protected def postBindingFailure(cause: Throwable): Unit = {
    actorSystem.log.error(cause, s"Error starting the server ${cause.getMessage}")
  }

  protected def waitForShutdownSignal()(implicit ec: ExecutionContext): Future[Done] = {
    val promise = Promise[Done]()
    sys.addShutdownHook {
      promise.trySuccess(Done)
    }
    Future {
      blocking {
        if (StdIn.readLine("Press <RETURN> to stop Shabondi WebServer...\n") != null)
          promise.trySuccess(Done)
      }
    }
    promise.future
  }

  protected def postServerShutdown(): Unit = actorSystem.log.info("Shutting down the server")

  def start(bindInterface: String, port: Int): Unit = {
    start(bindInterface, port, ServerSettings(actorSystem))
  }

  def start(bindInterface: String, port: Int, settings: ServerSettings): Unit = {
    implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher

    val bindingFuture: Future[Http.ServerBinding] = Http().bindAndHandle(
      handler = routes,
      interface = bindInterface,
      port = port,
      settings = settings
    )

    bindingFuture.onComplete {
      case Success(binding) =>
        postBinding(binding)
      case Failure(cause) =>
        postBindingFailure(cause)
    }

    Await.ready(
      bindingFuture.flatMap(_ => waitForShutdownSignal()),
      Duration.Inf
    )

    bindingFuture
      .flatMap(_.unbind())
      .onComplete { _ =>
        postServerShutdown()
        actorSystem.terminate()
      }
  }

  override def close(): Unit = actorSystem.terminate()
} 
Example 3
Source File: ComponentsFixture.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.test

import java.util.concurrent.atomic.AtomicReference

import com.daml.navigator.test.config.Arguments
import com.daml.navigator.test.runner.{HeadNavigator, PackagedDamlc, PackagedSandbox}
import com.typesafe.scalalogging.LazyLogging

import scala.io.Source
import scala.util.{Failure, Success, Try}

class ComponentsFixture(
    val args: Arguments,
    val navigatorPort: Int,
    val sandboxPort: Int,
    val scenario: String
) extends LazyLogging {

  // A list of commands on how to destroy started processes
  private val killProcs: AtomicReference[List[Unit => Unit]] = new AtomicReference(List.empty)

  private val onlineUrl = s"http://localhost:$navigatorPort/api/about"

  private def get(
      url: String,
      connectTimeout: Int = 1000,
      readTimeout: Int = 1000,
      requestMethod: String = "GET"
  ): String = {
    import java.net.{URL, HttpURLConnection}
    val connection = (new URL(url)).openConnection.asInstanceOf[HttpURLConnection]
    connection.setConnectTimeout(connectTimeout)
    connection.setReadTimeout(readTimeout)
    connection.setRequestMethod(requestMethod)
    val inputStream = connection.getInputStream
    val content = Source.fromInputStream(inputStream).mkString
    if (inputStream != null) inputStream.close()
    content
  }

  def startup(): Try[Unit] = {
    if (args.startComponents) {
      logger.info("Starting the sandbox and the Navigator")
      for {
        (darFile, tempFiles) <- Try(PackagedDamlc.run(args.damlPath))
        sandbox <- Try(PackagedSandbox.runAsync(sandboxPort, darFile, scenario))
        _ = killProcs.updateAndGet(s => sandbox :: s)
        navigator <- Try(
          HeadNavigator.runAsync(args.navConfPAth, args.navigatorDir, navigatorPort, sandboxPort))
        _ = killProcs.updateAndGet(s => navigator :: s)
      } yield { () }
    } else {
      Success(())
    }
  }

  private def retry[R](action: => R, maxRetries: Int, delayMillis: Int): Try[R] = {
    def retry0(count: Int): Try[R] = {
      Try(action) match {
        case Success(r) => Success(r)
        case Failure(e) =>
          if (count > maxRetries) {
            logger.error(
              s"Navigator is not available after $maxRetries retries with $delayMillis millis interval.")
            Failure(e)
          } else {
            logger.info(s"Navigator is not available yet, waiting $delayMillis millis ")
            Thread.sleep(delayMillis.toLong)
            retry0(count + 1)
          }
      }
    }

    retry0(0)
  }

  def waitForNavigator(): Try[Unit] = {
    logger.info(s"Waiting for the Navigator to start up (waiting for $onlineUrl)")
    retry({ get(onlineUrl); () }, 120, 1000)
  }

  def shutdown(): Unit = {
    killProcs.getAndUpdate(procs => {
      procs.foreach(killAction => Try { killAction(()) })
      List.empty
    })
    ()
  }
} 
Example 4
Source File: AuthorizationInterceptor.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.auth.interceptor

import com.daml.ledger.api.auth.{AuthService, Claims}
import com.daml.platform.server.api.validation.ErrorFactories.unauthenticated
import io.grpc.{
  Context,
  Contexts,
  Metadata,
  ServerCall,
  ServerCallHandler,
  ServerInterceptor,
  Status
}
import org.slf4j.{Logger, LoggerFactory}

import scala.compat.java8.FutureConverters
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success, Try}


final class AuthorizationInterceptor(protected val authService: AuthService, ec: ExecutionContext)
    extends ServerInterceptor {

  private val logger: Logger = LoggerFactory.getLogger(AuthorizationInterceptor.getClass)
  private val internalAuthenticationError =
    Status.INTERNAL.withDescription("Failed to get claims from request metadata")

  import AuthorizationInterceptor.contextKeyClaim

  override def interceptCall[ReqT, RespT](
      call: ServerCall[ReqT, RespT],
      headers: Metadata,
      nextListener: ServerCallHandler[ReqT, RespT]): ServerCall.Listener[ReqT] = {
    // Note: Context uses ThreadLocal storage, we need to capture it outside of the async block below.
    // Contexts are immutable and safe to pass around.
    val prevCtx = Context.current

    // The method interceptCall() must return a Listener.
    // The target listener is created by calling `Contexts.interceptCall()`.
    // However, this is only done after we have asynchronously received the claims.
    // Therefore, we need to return a listener that buffers all messages until the target listener is available.
    new AsyncForwardingListener[ReqT] {
      FutureConverters
        .toScala(authService.decodeMetadata(headers))
        .onComplete {
          case Failure(exception) =>
            logger.warn(s"Failed to get claims from request metadata: ${exception.getMessage}")
            call.close(internalAuthenticationError, new Metadata())
            new ServerCall.Listener[Nothing]() {}
          case Success(Claims.empty) =>
            logger.debug(s"Auth metadata decoded into empty claims, returning UNAUTHENTICATED")
            call.close(Status.UNAUTHENTICATED, new Metadata())
            new ServerCall.Listener[Nothing]() {}
          case Success(claims) =>
            val nextCtx = prevCtx.withValue(contextKeyClaim, claims)
            // Contexts.interceptCall() creates a listener that wraps all methods of `nextListener`
            // such that `Context.current` returns `nextCtx`.
            val nextListenerWithContext =
              Contexts.interceptCall(nextCtx, call, headers, nextListener)
            setNextListener(nextListenerWithContext)
            nextListenerWithContext
        }(ec)
    }
  }
}

object AuthorizationInterceptor {

  private val contextKeyClaim = Context.key[Claims]("AuthServiceDecodedClaim")

  def extractClaimsFromContext(): Try[Claims] =
    Option(contextKeyClaim.get()).fold[Try[Claims]](Failure(unauthenticated()))(Success(_))

  def apply(authService: AuthService, ec: ExecutionContext): AuthorizationInterceptor =
    new AuthorizationInterceptor(authService, ec)

} 
Example 5
Source File: InfiniteRetries.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import akka.actor.ActorSystem

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}

trait InfiniteRetries {

  protected def retry[T](action: => Future[T], delay: FiniteDuration = 10.millis)(
      implicit system: ActorSystem): Future[T] = {
    implicit val ec: ExecutionContext = system.dispatcher
    action.transformWith {
      case Success(v) =>
        Future.successful(v)
      case Failure(t) =>
        val p = Promise[T]()
        system.scheduler.scheduleOnce(
          delay,
          () =>
            retry[T](action, delay).onComplete {
              case Success(s) => p.success(s)
              case Failure(throwable) => p.failure(throwable)
          }
        )
        p.future
    }
  }
} 
Example 6
Source File: ApiOffset.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform

import com.daml.ledger.participant.state.v1.Offset
import com.daml.lf.data.Ref

import scala.util.{Failure, Success, Try}

// This utility object is used as a single point to encode and decode
// offsets sent over the API and received from the API.
object ApiOffset {

  def fromString(s: String): Try[Offset] =
    Ref.HexString
      .fromString(s)
      .fold(
        err => Failure(new IllegalArgumentException(err)),
        b => Success(Offset.fromHexString(b))
      )

  def assertFromString(s: String): Offset = fromString(s).get

  def toApiString(offset: Offset): Ref.LedgerString =
    offset.toHexString

  implicit class ApiOffsetConverter(val offset: Offset) {
    def toApiString: Ref.LedgerString = ApiOffset.toApiString(offset)
  }

} 
Example 7
Source File: TrackerMap.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services.tracking

import java.util.concurrent.atomic.AtomicReference

import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest
import com.daml.ledger.api.v1.completion.Completion
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import org.slf4j.LoggerFactory

import scala.collection.immutable.HashMap
import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


  final class AsyncResource[T <: AutoCloseable](future: Future[T]) {
    private val logger = LoggerFactory.getLogger(this.getClass)

    // Must progress Waiting => Ready => Closed or Waiting => Closed.
    val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting)

    future.andThen({
      case Success(t) =>
        if (!state.compareAndSet(Waiting, Ready(t))) {
          // This is the punch line of AsyncResource.
          // If we've been closed in the meantime, we must close the underlying resource also.
          // This "on-failure-to-complete" behavior is not present in scala or java Futures.
          t.close()
        }
      // Someone should be listening to this failure downstream
      // TODO(mthvedt): Refactor so at least one downstream listener is always present,
      // and exceptions are never dropped.
      case Failure(ex) =>
        logger.error("failure to get async resource", ex)
        state.set(Closed)
    })(DirectExecutionContext)

    def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = {
      state.get() match {
        case Waiting => future.flatMap(f)
        case Closed => throw new IllegalStateException()
        case Ready(t) => f(t)
      }
    }

    def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] =
      flatMap(t => Future.successful(f(t)))

    def ifPresent[U](f: T => U): Option[U] = state.get() match {
      case Ready(t) => Some(f(t))
      case _ => None
    }

    def close(): Unit = state.getAndSet(Closed) match {
      case Ready(t) => t.close()
      case _ =>
    }
  }

  def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap =
    new TrackerMap(retentionPeriod)
} 
Example 8
Source File: HandleOfferResult.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services.tracking

import akka.stream.QueueOfferResult
import com.daml.platform.server.api.ApiException
import com.google.rpc.status.Status
import io.grpc.{Status => GrpcStatus}

import scala.concurrent.Promise
import scala.util.{Failure, Success, Try}

private[tracking] object HandleOfferResult {
  val toGrpcStatus: PartialFunction[Try[QueueOfferResult], Option[GrpcStatus]] = {
    case Failure(t) =>
      t match {
        case i: IllegalStateException
            if i.getMessage == "You have to wait for previous offer to be resolved to send another request" =>
          Some(
            GrpcStatus.RESOURCE_EXHAUSTED
              .withDescription("Ingress buffer is full"))
        case _ =>
          Some(
            GrpcStatus.ABORTED
              .withDescription(s"Failure: ${t.getClass.getSimpleName}: ${t.getMessage}")
              .withCause(t))

      }
    case Success(QueueOfferResult.Failure(t)) =>
      Some(
        GrpcStatus.ABORTED
          .withDescription(s"Failed to enqueue: ${t.getClass.getSimpleName}: ${t.getMessage}")
          .withCause(t))
    case Success(QueueOfferResult.Dropped) =>
      Some(
        GrpcStatus.RESOURCE_EXHAUSTED
          .withDescription("Ingress buffer is full"))
    case Success(QueueOfferResult.QueueClosed) =>
      Some(GrpcStatus.ABORTED.withDescription("Queue closed"))
    case Success(QueueOfferResult.Enqueued) => None // Promise will be completed downstream.
  }

  def toStatusMessage: PartialFunction[Try[QueueOfferResult], Status] =
    toGrpcStatus.andThen(_.fold(Status())(e => Status(e.getCode.value(), e.getDescription)))

  def completePromise(promise: Promise[_]): PartialFunction[Try[QueueOfferResult], Unit] =
    toGrpcStatus.andThen(_.foreach(s => promise.tryFailure(new ApiException(s))))
} 
Example 9
Source File: GrpcHealthService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api.services.grpc

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.health.HealthChecks
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.DropRepeated
import com.daml.platform.server.api.services.grpc.GrpcHealthService._
import io.grpc.health.v1.health.{
  HealthAkkaGrpc,
  HealthCheckRequest,
  HealthCheckResponse,
  HealthGrpc
}
import io.grpc.{ServerServiceDefinition, Status, StatusException}

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

class GrpcHealthService(
    healthChecks: HealthChecks,
    maximumWatchFrequency: FiniteDuration = 1.second,
)(
    implicit protected val esf: ExecutionSequencerFactory,
    protected val mat: Materializer,
    executionContext: ExecutionContext,
) extends HealthAkkaGrpc
    with GrpcApiService {
  override def bindService(): ServerServiceDefinition =
    HealthGrpc.bindService(this, DirectExecutionContext)

  override def check(request: HealthCheckRequest): Future[HealthCheckResponse] =
    Future.fromTry(matchResponse(serviceFrom(request)))

  override def watchSource(request: HealthCheckRequest): Source[HealthCheckResponse, NotUsed] =
    Source
      .fromIterator(() => Iterator.continually(matchResponse(serviceFrom(request)).get))
      .throttle(1, per = maximumWatchFrequency)
      .via(DropRepeated())

  private def matchResponse(componentName: Option[String]): Try[HealthCheckResponse] =
    if (!componentName.forall(healthChecks.hasComponent))
      Failure(new StatusException(Status.NOT_FOUND))
    else if (healthChecks.isHealthy(componentName))
      Success(servingResponse)
    else
      Success(notServingResponse)
}

object GrpcHealthService {
  private[grpc] val servingResponse =
    HealthCheckResponse(HealthCheckResponse.ServingStatus.SERVING)

  private[grpc] val notServingResponse =
    HealthCheckResponse(HealthCheckResponse.ServingStatus.NOT_SERVING)

  private def serviceFrom(request: HealthCheckRequest): Option[String] = {
    Option(request.service).filter(_.nonEmpty)
  }
} 
Example 10
Source File: BatchedValidatingCommitter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.validator

import java.time.Instant

import akka.stream.Materializer
import com.daml.caching.Cache
import com.daml.ledger.participant.state.kvutils.Bytes
import com.daml.ledger.participant.state.kvutils.DamlKvutils.{DamlStateKey, DamlStateValue}
import com.daml.ledger.participant.state.v1.{ParticipantId, SubmissionResult}
import com.daml.ledger.validator.batch.{
  BatchedSubmissionValidator,
  BatchedSubmissionValidatorFactory
}
import com.daml.ledger.validator.caching.{CacheUpdatePolicy, ImmutablesOnlyCacheUpdatePolicy}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


class BatchedValidatingCommitter[LogResult](
    now: () => Instant,
    keySerializationStrategy: StateKeySerializationStrategy,
    validator: BatchedSubmissionValidator[LogResult],
    stateValueCache: Cache[DamlStateKey, DamlStateValue],
    cacheUpdatePolicy: CacheUpdatePolicy
)(implicit materializer: Materializer) {
  def commit(
      correlationId: String,
      envelope: Bytes,
      submittingParticipantId: ParticipantId,
      ledgerStateOperations: LedgerStateOperations[LogResult]
  )(implicit executionContext: ExecutionContext): Future[SubmissionResult] = {
    val (ledgerStateReader, commitStrategy) = readerAndCommitStrategyFrom(ledgerStateOperations)
    validator
      .validateAndCommit(
        envelope,
        correlationId,
        now(),
        submittingParticipantId,
        ledgerStateReader,
        commitStrategy
      )
      .transformWith {
        case Success(_) =>
          Future.successful(SubmissionResult.Acknowledged)
        case Failure(exception) =>
          Future.successful(SubmissionResult.InternalError(exception.getLocalizedMessage))
      }
  }

  private def readerAndCommitStrategyFrom(ledgerStateOperations: LedgerStateOperations[LogResult])(
      implicit executionContext: ExecutionContext)
    : (DamlLedgerStateReader, CommitStrategy[LogResult]) =
    if (stateValueCache == Cache.none) {
      BatchedSubmissionValidatorFactory
        .readerAndCommitStrategyFrom(ledgerStateOperations, keySerializationStrategy)
    } else {
      BatchedSubmissionValidatorFactory
        .cachingReaderAndCommitStrategyFrom(
          ledgerStateOperations,
          stateValueCache,
          cacheUpdatePolicy,
          keySerializationStrategy)
    }
}

object BatchedValidatingCommitter {
  def apply[LogResult](now: () => Instant, validator: BatchedSubmissionValidator[LogResult])(
      implicit materializer: Materializer): BatchedValidatingCommitter[LogResult] =
    new BatchedValidatingCommitter[LogResult](
      now,
      DefaultStateKeySerializationStrategy,
      validator,
      Cache.none,
      ImmutablesOnlyCacheUpdatePolicy)

  def apply[LogResult](
      now: () => Instant,
      validator: BatchedSubmissionValidator[LogResult],
      stateValueCache: Cache[DamlStateKey, DamlStateValue])(
      implicit materializer: Materializer): BatchedValidatingCommitter[LogResult] =
    new BatchedValidatingCommitter[LogResult](
      now,
      DefaultStateKeySerializationStrategy,
      validator,
      stateValueCache,
      ImmutablesOnlyCacheUpdatePolicy)
} 
Example 11
Source File: Main.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.jwt

import java.io.File

import scala.util.{Failure, Success}

object Main {

  object ErrorCodes {
    val InvalidUsage = 100
    val GenerateKeysError = 101
    val GenerateJwtError = 102
  }

  final case class Config(
      generateKeys: Option[GenerateKeys] = None,
      generateJwt: Option[GenerateJwt] = None)

  final case class GenerateKeys(name: Option[String] = None)
  final case class GenerateJwt(publicKey: Option[File] = None, privateKey: Option[File] = None)

  def main(args: Array[String]): Unit = {
    parseConfig(args) match {
      case Some(Config(Some(GenerateKeys(Some(name))), None)) =>
        RsaKeysGenerator.generate(keyPair(name)) match {
          case Success(a) =>
            print(s"Generated keys: ${a: domain.KeyPair[File]}")
          case Failure(e) =>
            e.printStackTrace()
            sys.exit(ErrorCodes.GenerateKeysError)
        }
      case Some(Config(None, Some(GenerateJwt(Some(publicKey), Some(privateKey))))) =>
        JwtGenerator.generate(
          domain.KeyPair(publicKey = Seq.empty[Byte], privateKey = Seq.empty[Byte])) match {
          case Success(a) =>
            println(s"Generated JWT: $a")
          case Failure(e) =>
            e.printStackTrace()
            sys.exit(ErrorCodes.GenerateJwtError)
        }
      case Some(_) =>
        configParser.showUsage()
        sys.exit(ErrorCodes.InvalidUsage)
      case None =>
        // error is printed out by scopt... yeah I know... why?
        sys.exit(ErrorCodes.InvalidUsage)
    }
  }

  private def keyPair(name: String) =
    domain.KeyPair(
      publicKey = new File(s"./$name.pub").getAbsoluteFile,
      privateKey = new File(s"./$name.pvt").getAbsoluteFile)

  private def parseConfig(args: Seq[String]): Option[Config] = {
    configParser.parse(args, Config())
  }

  private val configParser = new scopt.OptionParser[Config]("ledger-service-jwt") {
    cmd("generate-keys")
      .text("generate public and private keys")
      .action((_, c) => c.copy(generateKeys = Some(GenerateKeys())))
      .children(
        opt[String]("name")
          .required()
          .valueName("<keys name>")
          .action((x, c) => c.copy(generateKeys = c.generateKeys.map(_.copy(name = Some(x)))))
      )

    cmd("generate-jwt")
      .text("generate JWT")
      .action((_, c) => c.copy(generateJwt = Some(GenerateJwt())))
      .children(
        opt[File]("public-key")
          .required()
          .valueName("<public key file path>")
          .action((x, c) => c.copy(generateJwt = c.generateJwt.map(_.copy(publicKey = Some(x))))),
        opt[File]("private-key")
          .required()
          .valueName("<private key file path>")
          .action((x, c) => c.copy(generateJwt = c.generateJwt.map(_.copy(privateKey = Some(x)))))
      )
  }
} 
Example 12
Source File: RsaKeysGenerator.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.jwt

import java.io.{File, FileNotFoundException, FileOutputStream}

import com.daml.lf.data.TryOps.Bracket.bracket
import scalaz.std.option._
import scalaz.syntax.applicative._

import scala.util.{Failure, Success, Try}

object RsaKeysGenerator {

  private val keySize: Int = 2048

  def generate(destination: domain.KeyPair[File]): Try[domain.KeyPair[File]] =
    for {
      keyPair <- generate_(): Try[domain.KeyPair[Array[Byte]]]
      publicKeyFile <- writeKey(keyPair.publicKey, destination.publicKey)
      privateKeyFile <- writeKey(keyPair.privateKey, destination.privateKey)
    } yield domain.KeyPair(publicKey = publicKeyFile, privateKey = privateKeyFile)

  def generate(): Try[domain.KeyPair[Seq[Byte]]] =
    generate_().map(k => k.map(as => as.toSeq))

  private def generate_(): Try[domain.KeyPair[Array[Byte]]] =
    Try {
      val kpg = java.security.KeyPairGenerator.getInstance("RSA")
      kpg.initialize(keySize)
      Option(kpg.generateKeyPair()).flatMap(domainKeyPair)
    } flatMap {
      case Some(x) => Success(x)
      case None => Failure(new IllegalStateException("Cannot generate RSA key pair, null returned"))
    }

  private def domainKeyPair(k: java.security.KeyPair): Option[domain.KeyPair[Array[Byte]]] =
    ^(Option(k.getPublic), Option(k.getPrivate)) { (pub, pvt) =>
      domain.KeyPair(publicKey = pub.getEncoded, privateKey = pvt.getEncoded)
    }

  private def writeKey(key: Array[Byte], file: File): Try[File] =
    bracket(Try(new FileOutputStream(file)))(close).flatMap { ostream =>
      for {
        encoder <- Try(java.util.Base64.getEncoder)
        _ <- Try(ostream.write(encoder.encode(key)))
        _ <- exists(file)
      } yield file
    }

  private def close(a: FileOutputStream): Try[Unit] = Try(a.close())

  private def exists(f: File): Try[File] =
    for {
      b <- Try(f.exists())
      x <- if (b) Success(f) else Failure(new FileNotFoundException(f.getAbsolutePath))
    } yield x
} 
Example 13
Source File: DarManifestReader.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf
package archive

import java.io.InputStream
import java.util.jar.{Attributes, Manifest}

import scala.util.{Failure, Success, Try}

object DarManifestReader {

  private val supportedFormat = "daml-lf"

  def dalfNames(is: InputStream): Try[Dar[String]] = {
    val manifest = new Manifest(is)
    val attributes = value(manifest.getMainAttributes) _
    for {
      mainDalf <- attributes("Main-Dalf")
      allDalfs <- attributes("Dalfs")
      format <- attributes("Format")
      _ <- checkFormat(format)
    } yield Dar(mainDalf, dependencies(allDalfs, mainDalf))
  }

  private def dependencies(other: String, main: String): List[String] = {
    val deps = other.split(',').view.map(_.trim)
    deps.filter(x => x != main).toList
  }

  private def value(attributes: Attributes)(key: String): Try[String] =
    Option(attributes.getValue(key)) match {
      case None => failure(s"Cannot find attribute: $key")
      case Some(x) => Success(x.trim)
    }

  private def checkFormat(format: String): Try[Unit] =
    if (format == supportedFormat) Success(())
    else failure(s"Unsupported format: $format")

  private def failure(msg: String) = Failure(DarManifestReaderException(msg))

  case class DarManifestReaderException(msg: String) extends IllegalStateException(msg)
} 
Example 14
Source File: UniversalArchiveReader.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf
package archive

import java.io._
import java.util.zip.ZipInputStream

import com.daml.lf.data.Ref
import com.daml.lf.language.LanguageMajorVersion
import com.daml.daml_lf_dev.DamlLf

import scala.util.{Failure, Success, Try}

import com.daml.lf.data.TryOps.Bracket.bracket


object UniversalArchiveReaderWithVersion {
  def apply()
    : UniversalArchiveReader[((Ref.PackageId, DamlLf.ArchivePayload), LanguageMajorVersion)] =
    UniversalArchiveReader(parseDalf)

  private def parseDalf(is: InputStream) = Try(Reader.readArchiveAndVersion(is))
}

object SupportedFileType {
  def supportedFileType(f: File): Try[SupportedFileType] =
    if (DarFile.matchesFileExtension(f)) Success(DarFile)
    else if (DalfFile.matchesFileExtension(f)) Success(DalfFile)
    else Failure(UnsupportedFileExtension(f))

  sealed abstract class SupportedFileType(fileExtension: String) extends Serializable with Product {
    def matchesFileExtension(f: File): Boolean = f.getName.endsWith(fileExtension)
  }
  final case object DarFile extends SupportedFileType(".dar")
  final case object DalfFile extends SupportedFileType(".dalf")

  case class UnsupportedFileExtension(file: File)
      extends RuntimeException(s"Unsupported file extension: ${file.getAbsolutePath}")
} 
Example 15
Source File: DarManifestReaderTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.archive

import java.io.{ByteArrayInputStream, InputStream}
import java.nio.charset.Charset

import com.daml.lf.archive.DarManifestReader.DarManifestReaderException
import org.scalatest.{Inside, Matchers, WordSpec}

import scala.util.{Failure, Success}

class DarManifestReaderTest extends WordSpec with Matchers with Inside {

  private val unicode = Charset.forName("UTF-8")

  "should read dalf names from manifest, real scenario with Dalfs line split" in {
    val manifest = """Manifest-Version: 1.0
      |Created-By: Digital Asset packager (DAML-GHC)
      |Main-Dalf: com.daml.lf.archive:DarReaderTest:0.1.dalf
      |Dalfs: com.daml.lf.archive:DarReaderTest:0.1.dalf, daml-pri
      | m.dalf
      |Format: daml-lf
      |Encryption: non-encrypted""".stripMargin

    val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode))
    val actual = DarManifestReader.dalfNames(inputStream)

    actual shouldBe Success(
      Dar("com.daml.lf.archive:DarReaderTest:0.1.dalf", List("daml-prim.dalf")))

    inputStream.close()
  }

  "should read dalf names from manifest, Main-Dalf returned in the head" in {
    val manifest = """Main-Dalf: A.dalf
                     |Dalfs: B.dalf, C.dalf, A.dalf, E.dalf
                     |Format: daml-lf
                     |Encryption: non-encrypted""".stripMargin

    val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode))
    val actual = DarManifestReader.dalfNames(inputStream)

    actual shouldBe Success(Dar("A.dalf", List("B.dalf", "C.dalf", "E.dalf")))

    inputStream.close()
  }

  "should read dalf names from manifest, can handle one Dalf per manifest" in {
    val manifest = """Main-Dalf: A.dalf
                     |Dalfs: A.dalf
                     |Format: daml-lf
                     |Encryption: non-encrypted""".stripMargin

    val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode))
    val actual = DarManifestReader.dalfNames(inputStream)

    actual shouldBe Success(Dar("A.dalf", List.empty))

    inputStream.close()
  }

  "should return failure if Format is not daml-lf" in {
    val manifest = """Main-Dalf: A.dalf
                     |Dalfs: B.dalf, C.dalf, A.dalf, E.dalf
                     |Format: anything-different-from-daml-lf
                     |Encryption: non-encrypted""".stripMargin

    val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode))
    val actual = DarManifestReader.dalfNames(inputStream)

    inside(actual) {
      case Failure(DarManifestReaderException(msg)) =>
        msg shouldBe "Unsupported format: anything-different-from-daml-lf"
    }

    inputStream.close()
  }
} 
Example 16
Source File: TryOps.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.data

import scala.util.{Failure, Success, Try}

private[daml] object TryOps {
  def sequence[A](list: List[Try[A]]): Try[List[A]] = {
    val zero: Try[List[A]] = Success(List.empty[A])
    list.foldRight(zero)((a, as) => map2(a, as)(_ :: _))
  }

  def map2[A, B, C](ta: Try[A], tb: Try[B])(f: (A, B) => C): Try[C] =
    for {
      a <- ta
      b <- tb
    } yield f(a, b)

  object Bracket {

    
    def bracket[A, B](fa: Try[A])(cleanup: A => Try[B]): Bracket[A, B] = new Bracket(fa, cleanup)

    final class Bracket[A, B](fa: Try[A], cleanup: A => Try[B]) {
      def flatMap[C](f: A => Try[C]): Try[C] = {
        val fc = fa.flatMap(a => f(a))
        val fb = fa.flatMap(a => cleanup(a))
        (fc, fb) match {
          case (Success(_), Success(_)) => fc
          case (e @ Failure(_), _) => e
          case (Success(_), Failure(e)) => Failure(e)
        }
      }

      def map[C](f: A => C): Try[C] = flatMap(a => Try(f(a)))
    }
  }
} 
Example 17
Source File: TryOpsTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.data

import org.scalatest.{Matchers, WordSpec}
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import com.daml.lf.data.TryOps.Bracket.bracket

import scala.util.{Failure, Success, Try}

class TryOpsTest extends WordSpec with Matchers with GeneratorDrivenPropertyChecks {

  "bracket should call clean after successful calculation" in forAll { (a: Int, b: Int) =>
    var calls = List.empty[String]

    def clean(x: Int): Try[Unit] = {
      calls = s"clean $x" :: calls
      Success(())
    }

    def add(x: Int)(y: Int): Try[Int] = {
      calls = s"add $x $y" :: calls
      Success(x + y)
    }

    val actual = bracket(Try(a))(clean).flatMap(add(b))
    actual shouldBe Success(a + b)
    calls.reverse shouldBe List(s"add $b $a", s"clean $a")
  }

  "bracket should fail if clean failed" in forAll { (a: Int, b: Int, e: Throwable) =>
    var calls = List.empty[String]

    def clean(x: Int): Try[Unit] = {
      calls = s"clean $x $e" :: calls
      Failure(e)
    }

    def add(x: Int)(y: Int): Try[Int] = {
      calls = s"add $x $y" :: calls
      Success(x + y)
    }

    val actual = bracket(Try(a))(clean).flatMap(add(b))
    actual shouldBe Failure(e)
    calls.reverse shouldBe List(s"add $b $a", s"clean $a $e")
  }

  "bracket should call clean if calculation fails" in forAll { (a: Int, b: Int, e: Throwable) =>
    var calls = List.empty[String]

    def clean(x: Int): Try[Unit] = {
      calls = s"clean $x" :: calls
      Success(())
    }

    def add(x: Int)(y: Int): Try[Int] = {
      calls = s"add $x $y" :: calls
      Failure(e)
    }

    val actual = bracket(Try(a))(clean).flatMap(add(b))
    actual shouldBe Failure(e)
    calls.reverse shouldBe List(s"add $b $a", s"clean $a")
  }

  "bracket should return calculation error if if both calculation and clean fail" in forAll {
    (a: Int, b: Int, e1: Throwable, e2: Throwable) =>
      var calls = List.empty[String]

      def clean(x: Int): Try[Unit] = {
        calls = s"clean $x $e2" :: calls
        Failure(e2)
      }

      def add(x: Int)(y: Int): Try[Int] = {
        calls = s"add $x $y" :: calls
        Failure(e1)
      }

      val actual = bracket(Try(a))(clean).flatMap(add(b))
      actual shouldBe Failure(e1)
      calls.reverse shouldBe List(s"add $b $a", s"clean $a $e2")
  }
} 
Example 18
Source File: CodegenMain.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.codegen

import com.daml.codegen.{Main => ScalaCodegen}
import com.daml.lf.codegen.conf.CodegenConfigReader.{CodegenDest, Java, Scala}
import com.daml.lf.codegen.conf.{CodegenConfigReader, Conf}
import com.daml.lf.codegen.{CodeGenRunner => JavaCodegen}

import scala.util.{Failure, Success, Try}

object CodegenMain {

  sealed abstract class ExitCode(val code: Int)
  object OK extends ExitCode(0)
  object UsageError extends ExitCode(101)
  object CodegenError extends ExitCode(201)

  private final case class FrontEndConfig(mode: Option[CodegenDest])

  def main(args: Array[String]): Unit = {
    val exitCode: ExitCode = parseFrontEndConfig(args) match {
      case Some(FrontEndConfig(Some(Java))) =>
        javaCodegen(args.tail)
      case Some(FrontEndConfig(Some(Scala))) =>
        scalaCodegen(args.tail)
      case Some(FrontEndConfig(None)) | None =>
        println("\n")
        cliConfigParser.showUsage()
        UsageError
    }
    sys.exit(exitCode.code)
  }

  private def javaCodegen(args: Array[String]): ExitCode = {
    println("Java codegen")
    runCodegen(JavaCodegen.run, codegenConfig(args, Java))
  }

  private def scalaCodegen(args: Array[String]): ExitCode = {
    println("Scala codegen")
    runCodegen(ScalaCodegen.generateCode, codegenConfig(args, Scala))
  }

  private def runCodegen(generate: Conf => Unit, configO: Option[Conf]): ExitCode =
    configO match {
      case None =>
        println("\n")
        Conf.parser.showUsage
        UsageError
      case Some(conf) =>
        Try(generate(conf)) match {
          case Success(_) =>
            OK
          case Failure(t) =>
            println(s"Error generating code: ${t.getMessage}")
            CodegenError
        }
    }

  private def codegenConfig(args: Array[String], mode: CodegenDest): Option[Conf] =
    if (args.nonEmpty) {
      println(s"Reading configuration from command line input: ${args.mkString(",")}")
      Conf.parse(args)
    } else {
      println(s"Reading configuration from project configuration file")
      CodegenConfigReader.readFromEnv(mode) match {
        case Left(e) => println(s"Error reading project configuration file: $e"); None
        case Right(c) => Some(c)
      }
    }

  private def parseFrontEndConfig(args: Seq[String]): Option[FrontEndConfig] = args match {
    case h +: _ => cliConfigParser.parse(Seq(h), FrontEndConfig(None))
    case _ => None
  }

  private val cliConfigParser = new scopt.OptionParser[FrontEndConfig]("codegen-front-end") {
    head("Codegen front end")

    override def showUsageOnError = false

    help("help").text("Prints this usage text")
    note("\n")

    cmd("java")
      .action((_, c) => c.copy(mode = Some(Java)))
      .text("To generate Java code:\n")
      .children(help("help").text("Java codegen help"))
    note("\n")

    cmd("scala")
      .action((_, c) => c.copy(mode = Some(Scala)))
      .text("To generate Scala code:\n")
      .children(help("help").text("Scala codegen help"))
    note("\n")
  }
} 
Example 19
Source File: Positive.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.config

import pureconfig.ConfigConvert

import scala.util.{Failure, Success, Try}

class Positive[T: Numeric] private (val value: T) {
  override def toString: String = value.toString
}

object Positive {
  def apply[T](num: T)(implicit numeric: Numeric[T]): Try[Positive[T]] = {
    if (numeric.lteq(num, numeric.zero)) {
      Failure(new IllegalArgumentException(s"$num must be positive."))
    } else {
      Success(new Positive(num))
    }
  }

  def unsafe[T](num: T)(implicit numeric: Numeric[T]): Positive[T] = Positive(num).get

  implicit val configConvertL: ConfigConvert[Positive[Long]] = convertPositive(_.toLong)

  implicit val configConvertI: ConfigConvert[Positive[Int]] = convertPositive(_.toInt)

  private def convertPositive[T: Numeric](readStr: String => T) = {

    ConfigConvert.viaStringTry[Positive[T]]({ s =>
      for {
        number <- Try(readStr(s))
        positive <- apply(number)
      } yield positive
    }, _.toString)
  }
} 
Example 20
Source File: CommandStatusRow.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.data

import com.daml.ledger.api.refinements.ApiTypes
import com.daml.navigator.model._

import scala.util.{Failure, Success, Try}
import scalaz.syntax.tag._

final case class CommandStatusRow(
    commandId: String,
    isCompleted: Boolean,
    subclassType: String,
    code: Option[String],
    details: Option[String],
    transactionId: Option[String]
) {

  def toCommandStatus(
      transactionById: ApiTypes.TransactionId => Try[Option[Transaction]]): Try[CommandStatus] = {
    subclassType match {
      case "CommandStatusWaiting" =>
        Success(CommandStatusWaiting())
      case "CommandStatusError" =>
        (for {
          c <- code
          d <- details
        } yield {
          CommandStatusError(c, d)
        }).fold[Try[CommandStatus]](
          Failure(
            DeserializationFailed(s"Failed to deserialize CommandStatusError from row: $this"))
        )(
          Success(_)
        )
      case "CommandStatusSuccess" =>
        transactionId.map { tId =>
          transactionById(ApiTypes.TransactionId(tId))
        } match {
          case Some(Success(Some(tx: Transaction))) => Success(CommandStatusSuccess(tx))
          case Some(Failure(e)) =>
            Failure(RecordNotFound(
              s"Failed to load transaction $transactionId for CommandStatus with commandId: $commandId. Exception: ${e.getMessage}"))
          case Some(Success(None)) =>
            Failure(RecordNotFound(
              s"Failed to load transaction $transactionId for CommandStatus with commandId: $commandId"))
          case None =>
            Failure(
              DeserializationFailed(
                s"TransactionId is missing for CommandStatusSuccess row: $this"))
        }
      case "CommandStatusUnknown" =>
        Success(CommandStatusUnknown())
      case s => Failure(DeserializationFailed(s"unknown subclass type for CommandStatus: $s"))
    }
  }
}

object CommandStatusRow {

  def fromCommandStatus(commandId: ApiTypes.CommandId, cs: CommandStatus): CommandStatusRow = {
    cs match {
      case w: CommandStatusWaiting =>
        CommandStatusRow(commandId.unwrap, w.isCompleted, "CommandStatusWaiting", None, None, None)
      case e: CommandStatusError =>
        CommandStatusRow(
          commandId.unwrap,
          e.isCompleted,
          "CommandStatusError",
          Some(e.code),
          Some(e.details),
          None)
      case s: CommandStatusSuccess =>
        CommandStatusRow(
          commandId.unwrap,
          s.isCompleted,
          "CommandStatusSuccess",
          None,
          None,
          Some(s.tx.id.unwrap))
      case u: CommandStatusUnknown =>
        CommandStatusRow(commandId.unwrap, u.isCompleted, "CommandStatusUnknown", None, None, None)
    }
  }
} 
Example 21
Source File: ContractRow.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.data

import com.daml.ledger.api.refinements.ApiTypes
import com.daml.lf.value.json.ApiCodecCompressed
import ApiCodecCompressed.JsonImplicits._
import com.daml.navigator.json.ModelCodec.JsonImplicits._
import com.daml.navigator.model._

import scala.util.{Failure, Try}
import scalaz.syntax.tag._
import spray.json._

final case class ContractRow(
    id: String,
    templateId: String,
    archiveTransactionId: Option[String],
    argument: String,
    agreementText: Option[String],
    signatories: String,
    observers: String,
    key: Option[String]
) {

  def toContract(types: PackageRegistry): Try[Contract] = {
    (for {
      id <- Try(ApiTypes.ContractId(id))
      tid <- Try(parseOpaqueIdentifier(templateId).get)
      template <- Try(types.template(tid).get)
      recArgAny <- Try(
        ApiCodecCompressed.jsValueToApiValue(argument.parseJson, tid, types.damlLfDefDataType _))
      recArg <- Try(recArgAny.asInstanceOf[ApiRecord])
      sig <- Try(signatories.parseJson.convertTo[List[ApiTypes.Party]])
      obs <- Try(signatories.parseJson.convertTo[List[ApiTypes.Party]])
      key <- Try(
        key.map(_.parseJson.convertTo[ApiValue](
          ApiCodecCompressed.apiValueJsonReader(template.key.get, types.damlLfDefDataType _))))
    } yield {
      Contract(id, template, recArg, agreementText, sig, obs, key)
    }).recoverWith {
      case e: Throwable =>
        Failure(DeserializationFailed(s"Failed to deserialize Contract from row: $this. Error: $e"))
    }
  }
}

object ContractRow {

  def fromContract(c: Contract): ContractRow = {
    ContractRow(
      c.id.unwrap,
      c.template.id.asOpaqueString,
      None,
      c.argument.toJson.compactPrint,
      c.agreementText,
      c.signatories.toJson.compactPrint,
      c.observers.toJson.compactPrint,
      c.key.map(_.toJson.compactPrint)
    )
  }
} 
Example 22
Source File: Read.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator

import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}


  def fromUnsafeFunction[To](f: String => To)(implicit classTag: ClassTag[To]): Read[To] =
    fromFunction[To] { str =>
      Try(f(str)) match {
        case Success(str) => Right(str)
        case Failure(f) => Read.fail[To](classTag)
      }
    }

  implicit val readString = Read.fromFunction[String](str => Right(str))
  implicit val readBoolean = Read.fromUnsafeFunction[Boolean](_.toBoolean)
  implicit val readInt = Read.fromUnsafeFunction[Int](_.toInt)
  implicit val readFloat = Read.fromUnsafeFunction[Float](_.toFloat)
  implicit val readDouble = Read.fromUnsafeFunction[Double](_.toDouble)
} 
Example 23
Source File: ContextualizedLogger.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.logging

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.daml.grpc.GrpcException
import io.grpc.Status
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.util.{Failure, Try}
import scala.util.control.NonFatal

object ContextualizedLogger {

  // Caches loggers to prevent them from needlessly wasting memory
  // Replicates the behavior of the underlying Slf4j logger factory
  private[this] val cache = TrieMap.empty[String, ContextualizedLogger]

  // Allows to explicitly pass a logger, should be used for testing only
  private[logging] def createFor(withoutContext: Logger): ContextualizedLogger =
    new ContextualizedLogger(withoutContext)

  // Slf4j handles the caching of the underlying logger itself
  private[logging] def createFor(name: String): ContextualizedLogger =
    createFor(LoggerFactory.getLogger(name))

  
  def get(clazz: Class[_]): ContextualizedLogger = {
    val name = clazz.getName.stripSuffix("$")
    cache.getOrElseUpdate(name, createFor(name))
  }

}

final class ContextualizedLogger private (val withoutContext: Logger) {

  val trace = new LeveledLogger.Trace(withoutContext)
  val debug = new LeveledLogger.Debug(withoutContext)
  val info = new LeveledLogger.Info(withoutContext)
  val warn = new LeveledLogger.Warn(withoutContext)
  val error = new LeveledLogger.Error(withoutContext)

  private def internalOrUnknown(code: Status.Code): Boolean =
    code == Status.Code.INTERNAL || code == Status.Code.UNKNOWN

  private def logError(t: Throwable)(implicit logCtx: LoggingContext): Unit =
    error("Unhandled internal error", t)

  def logErrorsOnCall[Out](implicit logCtx: LoggingContext): PartialFunction[Try[Out], Unit] = {
    case Failure(e @ GrpcException(s, _)) =>
      if (internalOrUnknown(s.getCode)) {
        logError(e)
      }
    case Failure(NonFatal(e)) =>
      logError(e)
  }

  def logErrorsOnStream[Out](implicit logCtx: LoggingContext): Flow[Out, Out, NotUsed] =
    Flow[Out].mapError {
      case e @ GrpcException(s, _) =>
        if (internalOrUnknown(s.getCode)) {
          logError(e)
        }
        e
      case NonFatal(e) =>
        logError(e)
        e
    }

} 
Example 24
Source File: Port.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ports

import scala.util.{Failure, Success, Try}

final case class Port private (value: Int) extends AnyVal {
  override def toString: String = value.toString
}

object Port {
  private val ValidPorts: Range = 0 until 0x10000

  
  val Dynamic = new Port(0)

  def apply(value: Int): Port =
    create(value).get

  def validate(value: Int): Try[Unit] =
    create(value).map(_ => ())

  private def create(value: Int): Try[Port] =
    if (ValidPorts.contains(value))
      Success(new Port(value))
    else
      Failure(
        new IllegalArgumentException(
          s"Ports must be in the range ${ValidPorts.start}—${ValidPorts.last}."))
} 
Example 25
Source File: Spin.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.app.components

import com.thoughtworks.binding
import com.thoughtworks.binding.Binding
import org.scalajs.dom.Element

import scala.util.{Failure, Success, Try}

object Spin {
  @binding.dom
  def render(color: String = "blue"): Binding[Element] =
    <div class="spinner">
        <div class={s"rect1 $color"}></div>
        <div class={s"rect2 $color"}></div>
        <div class={s"rect3 $color"}></div>
        <div class={s"rect4 $color"}></div>
        <div class={s"rect5 $color"}></div>
      </div>

  @binding.dom
  def renderFuture[A](fb: Binding[Option[Try[A]]]): Binding[Element] = fb.bind match {
    case Some(Success(a)) => <div>{a.toString}</div>
    case Some(Failure(e)) => <div>{e.toString}</div>
    case None             => <div>{render().bind}</div>
  }
} 
Example 26
Source File: ChaosActorInterface.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import akka.actor.ActorRef
import akka.io.Tcp
import akka.util.ByteString
import akka.pattern.ask
import akka.util.Timeout
import com.rbmhtechnology.eventuate.chaos.ChaosActorInterface.HealthCheckResult
import com.rbmhtechnology.eventuate.chaos.ChaosActorInterface.HealthCheck

import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success


object ChaosActorInterface {
  case class HealthCheck(requester: ActorRef)
  case class HealthCheckResult(state: Int, requester: ActorRef)
}

class ChaosActorInterface(chaosActor: ActorRef) extends ChaosInterface {
  implicit val timeout = Timeout(1.seconds)

  def handleCommand = {
    case ("persist", None, recv) =>
      val check = HealthCheck(recv)

      (chaosActor ? check).mapTo[HealthCheckResult] onComplete {
        case Success(result) =>
          result.requester ! Tcp.Write(ByteString(result.state.toString))
          result.requester ! Tcp.Close
        case Failure(e) =>
          recv ! Tcp.Close
      }
  }
} 
Example 27
Source File: TestSpec.scala    From nanotest-strawman   with Apache License 2.0 5 votes vote down vote up
package verify

import scala.concurrent.{ ExecutionContext, Future, Promise }
import scala.util.control.NonFatal
import scala.util.{ Failure, Success }
import verify.sourcecode.SourceLocation

case class TestSpec[I, +O](name: String, f: I => Future[Result[O]]) extends (I => Future[Result[O]]) {

  override def apply(v1: I): Future[Result[O]] = f(v1)
}

object TestSpec {
  def async[Env](name: String, cb: Env => Future[Unit])(implicit ec: ExecutionContext): TestSpec[Env, Unit] =
    TestSpec(
      name, { env =>
        val f: Future[Unit] =
          try cb(env)
          catch { case NonFatal(ex) => Future.failed(ex) }

        val p = Promise[Result[Unit]]()
        f.onComplete {
          case Success(_) =>
            p.success(Result.Success(()))
          case Failure(ex) =>
            p.success(Result.from(ex))
        }
        p.future
      }
    )

  def sync[Env](name: String, cb: Env => Void): TestSpec[Env, Unit] =
    TestSpec(
      name, { env =>
        try {
          cb(env) match {
            case Void.UnitRef =>
              Future.successful(Result.Success(()))
            case Void.Caught(ref, loc) =>
              Future.successful(unexpected(ref, loc))
          }
        } catch {
          case NonFatal(ex) =>
            Future.successful(Result.from(ex))
        }
      }
    )

  private def unexpected[A](ref: A, loc: SourceLocation): Result[Nothing] =
    Result.Failure(
      s"Problem with test spec, expecting `Unit`, but received: $ref ",
      None,
      Some(loc)
    )
} 
Example 28
Source File: ArangoWriteAheadLog.scala    From scarango   with MIT License 5 votes vote down vote up
package com.outr.arango

import com.outr.arango.api.{APIWalTail, WALOperation, WALOperations}
import io.youi.client.HttpClient
import io.youi.util.Time
import reactify.Channel

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

class ArangoWriteAheadLog(client: HttpClient) {
  def tail(global: Boolean = false,
           from: Option[Long] = None,
           to: Option[Long] = None,
           lastScanned: Long = 0L,
           chunkSize: Option[Long] = None,
           syncerId: Option[Long] = None,
           serverId: Option[Long] = None,
           clientId: String = "scarango")
          (implicit ec: ExecutionContext): Future[WALOperations] = {
    APIWalTail.get(
      client = client,
      global = Some(global),
      from = from,
      to = to,
      lastScanned = lastScanned,
      chunkSize = chunkSize,
      syncerId = syncerId,
      serverId = serverId,
      clientId = Some(clientId)
    )
  }

  def monitor(global: Boolean = false,
              from: Option[Long] = None,
              to: Option[Long] = None,
              lastScanned: Long = 0L,
              chunkSize: Option[Long] = None,
              syncerId: Option[Long] = None,
              serverId: Option[Long] = None,
              clientId: String = "scarango",
              delay: FiniteDuration = 5.seconds,
              skipHistory: Boolean = true,
              failureHandler: Throwable => Option[FiniteDuration] = t => {
                scribe.error("Monitor error", t)
                None
              })
             (implicit ec: ExecutionContext): WriteAheadLogMonitor = {
    val m = new WriteAheadLogMonitor(delay, skipHistory, failureHandler)
    m.run(tail(global, from, to, lastScanned, chunkSize, syncerId, serverId, clientId))
    m
  }
}

class WriteAheadLogMonitor(delay: FiniteDuration, skipHistory: Boolean, failureHandler: Throwable => Option[FiniteDuration]) extends Channel[WALOperation] {
  private var keepAlive = false
  private var last: Option[WALOperations] = None
  private var from: Long = 0L
  private var skipped: Boolean = false

  val tailed: Channel[WALOperations] = Channel[WALOperations]

  private[arango] def run(future: Future[WALOperations])(implicit ec: ExecutionContext): Unit = {
    keepAlive = true

    future.onComplete { complete =>
      val d = complete match {
        case Success(operations) => try {
          if (skipHistory && !skipped) {
            if (operations.lastIncluded == 0L) {
              skipped = true
            }
          } else {
            operations.operations.foreach(static)
          }
          last = Some(operations)
          from = math.max(from, operations.lastIncluded)
          tailed @= operations
          Some(delay)
        } catch {
          case t: Throwable => failureHandler(t)
        }
        case Failure(exception) => failureHandler(exception)
      }
      d match {
        case Some(delay) if keepAlive => last.foreach { ops =>
          Time.delay(delay).foreach(_ => run(ops.tail(from)))
        }
        case _ => // Error or keepAlive caused monitor to stop
      }
    }
  }

  def stop(): Unit = keepAlive = false
} 
Example 29
Source File: GoldenCodecLaws.scala    From circe-golden   with Apache License 2.0 5 votes vote down vote up
package io.circe.testing.golden

import cats.instances.list._, cats.instances.try_._
import cats.syntax.traverse._
import cats.laws._
import io.circe.{ Json, Printer }
import io.circe.testing.CodecLaws
import scala.util.{ Failure, Success, Try }

trait GoldenCodecLaws[A] extends CodecLaws[A] {

  
  protected def goldenExamples: Try[List[(A, String)]]

  final def goldenDecoding: Try[List[IsEq[A]]] = goldenExamples.flatMap {
    _.traverse {
      case (value, encoded) =>
        io.circe.parser.decode[A](encoded)(decode) match {
          case Left(error)    => Failure(error)
          case Right(decoded) => Success(decoded <-> value)
        }
    }
  }

  final def goldenEncoding: Try[List[IsEq[String]]] = goldenExamples.map {
    _.map {
      case (value, encoded) =>
        printJson(encode(value)) <-> encoded
    }
  }
} 
Example 30
Source File: ResourceFileGoldenCodecLaws.scala    From circe-golden   with Apache License 2.0 5 votes vote down vote up
package io.circe.testing.golden

import cats.instances.list._, cats.instances.try_._
import cats.syntax.apply._, cats.syntax.traverse._
import io.circe.{ Decoder, Encoder, Printer }
import java.io.{ File, PrintWriter }
import org.scalacheck.{ Arbitrary, Gen }
import scala.reflect.runtime.universe.TypeTag
import scala.util.{ Failure, Try }
import scala.util.matching.Regex

abstract class ResourceFileGoldenCodecLaws[A](
  name: String,
  resourceRootDir: File,
  resourcePackage: List[String],
  val size: Int,
  count: Int,
  override protected val printer: Printer
) extends GoldenCodecLaws[A]
    with ExampleGeneration[A] {

  private[this] val resourceRootPath: String = "/" + resourcePackage.mkString("/") + "/"
  private[this] val resourceDir: File = resourcePackage.foldLeft(resourceRootDir) {
    case (acc, p) => new File(acc, p)
  }
  private[this] val GoldenFilePattern: Regex = "^-(.{44})\\.json$".r

  private[this] lazy val loadGoldenFiles: Try[List[(A, String)]] =
    Resources.open(resourceRootPath).flatMap { dirSource =>
      val files = dirSource.getLines.flatMap {
        case fileName if fileName.startsWith(name) =>
          fileName.drop(name.length) match {
            case GoldenFilePattern(seed) => Some((seed, fileName))
            case _                       => None
          }
        case _ => None
      }.toList.traverse[Try, (A, String)] {
        case (seed, name) =>
          val contents = Resources.open(resourceRootPath + name).map { source =>
            val lines = source.getLines.mkString("\n")
            source.close()
            lines
          }
          (getValueFromBase64Seed(seed), contents).tupled
      }

      dirSource.close()

      // Fail if we don't have either zero golden files or the required number.
      files.flatMap { values =>
        if (values.size == 0 || values.size == count) files
        else Failure(new IllegalStateException(s"Expected 0 or $count golden files, got ${values.size}"))
      }
    }

  private[this] def generateGoldenFiles: Try[List[(A, String)]] =
    generateRandomGoldenExamples(count).traverse {
      case (seed, value, encoded) =>
        Try {
          resourceDir.mkdirs()
          val file = new File(resourceDir, s"$name-${seed.toBase64}.json")

          val writer = new PrintWriter(file)
          writer.print(encoded)
          writer.close()

          (value, encoded)
        }
    }

  protected lazy val goldenExamples: Try[List[(A, String)]] =
    loadGoldenFiles.flatMap(fs => if (fs.isEmpty) generateGoldenFiles else loadGoldenFiles)
}

object ResourceFileGoldenCodecLaws {
  def apply[A](
    name: String,
    resourceRootDir: File,
    resourcePackage: List[String],
    size: Int,
    count: Int,
   printer: Printer
  )(implicit decodeA: Decoder[A], encodeA: Encoder[A], arbitraryA: Arbitrary[A]): GoldenCodecLaws[A] =
    new ResourceFileGoldenCodecLaws[A](name, resourceRootDir, resourcePackage, size, count, printer) {
      val decode: Decoder[A] = decodeA
      val encode: Encoder[A] = encodeA
      val gen: Gen[A] = arbitraryA.arbitrary
    }

  def apply[A](
    size: Int = 100,
    count: Int = 1,
    printer: Printer = Printer.spaces2
  )(
    implicit decodeA: Decoder[A],
    encodeA: Encoder[A],
    arbitraryA: Arbitrary[A],
    typeTagA: TypeTag[A]
  ): GoldenCodecLaws[A] =
    apply[A](Resources.inferName[A], Resources.inferRootDir, Resources.inferPackage[A], size, count, printer)
} 
Example 31
Source File: GoldenCodecTests.scala    From circe-golden   with Apache License 2.0 5 votes vote down vote up
package io.circe.testing.golden

import cats.instances.string._
import cats.kernel.Eq
import cats.laws.IsEq
import cats.laws.discipline.catsLawsIsEqToProp
import io.circe.{ Decoder, Encoder, Json, Printer }
import io.circe.testing.CodecTests
import org.scalacheck.{ Arbitrary, Prop, Shrink }
import scala.reflect.runtime.universe.TypeTag
import scala.util.{ Failure, Success, Try }

trait GoldenCodecTests[A] extends CodecTests[A] {
  def laws: GoldenCodecLaws[A]

  private[this] def tryListToProp[A: Eq](result: Try[List[IsEq[A]]]): Prop = result match {
    case Failure(error)      => Prop.exception(error)
    case Success(equalities) => Prop.all(equalities.map(catsLawsIsEqToProp(_)): _*)
  }

  def goldenCodec(
    implicit
    arbitraryA: Arbitrary[A],
    shrinkA: Shrink[A],
    eqA: Eq[A],
    arbitraryJson: Arbitrary[Json],
    shrinkJson: Shrink[Json]
  ): RuleSet = new DefaultRuleSet(
    name = "goldenCodec",
    parent = Some(codec),
    "decoding golden files" -> tryListToProp(laws.goldenDecoding),
    "encoding golden files" -> tryListToProp(laws.goldenEncoding)
  )

  def unserializableGoldenCodec(
    implicit
    arbitraryA: Arbitrary[A],
    shrinkA: Shrink[A],
    eqA: Eq[A],
    arbitraryJson: Arbitrary[Json],
    shrinkJson: Shrink[Json]
  ): RuleSet = new DefaultRuleSet(
    name = "goldenCodec",
    parent = Some(unserializableCodec),
    "decoding golden files" -> tryListToProp(laws.goldenDecoding),
    "encoding golden files" -> tryListToProp(laws.goldenEncoding)
  )
}

object GoldenCodecTests {
  def apply[A: Decoder: Encoder: Arbitrary: TypeTag]: GoldenCodecTests[A] =
    apply[A](ResourceFileGoldenCodecLaws[A]())

  def apply[A: Decoder: Encoder: Arbitrary: TypeTag](printer: Printer): GoldenCodecTests[A] =
    apply[A](ResourceFileGoldenCodecLaws[A](printer = printer))

  def apply[A: Decoder: Encoder: Arbitrary: TypeTag](count: Int): GoldenCodecTests[A] =
    apply[A](ResourceFileGoldenCodecLaws[A](count = count))

  def apply[A: Decoder: Encoder: Arbitrary: TypeTag](count: Int, printer: Printer): GoldenCodecTests[A] =
    apply[A](ResourceFileGoldenCodecLaws[A](count = count, printer = printer))

  def apply[A: Decoder: Encoder: Arbitrary](laws0: GoldenCodecLaws[A]): GoldenCodecTests[A] =
    new GoldenCodecTests[A] {
      val laws: GoldenCodecLaws[A] = laws0
    }
} 
Example 32
Source File: Job.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import scala.util.{Failure, Try}

import org.apache.spark.streaming.Time
import org.apache.spark.util.{CallSite, Utils}


  def outputOpId: Int = {
    if (!isSet) {
      throw new IllegalStateException("Cannot access number before calling setId")
    }
    _outputOpId
  }

  def setOutputOpId(outputOpId: Int) {
    if (isSet) {
      throw new IllegalStateException("Cannot call setOutputOpId more than once")
    }
    isSet = true
    _id = s"streaming job $time.$outputOpId"
    _outputOpId = outputOpId
  }

  def setCallSite(callSite: CallSite): Unit = {
    _callSite = callSite
  }

  def callSite: CallSite = _callSite

  def setStartTime(startTime: Long): Unit = {
    _startTime = Some(startTime)
  }

  def setEndTime(endTime: Long): Unit = {
    _endTime = Some(endTime)
  }

  def toOutputOperationInfo: OutputOperationInfo = {
    val failureReason = if (_result != null && _result.isFailure) {
      Some(Utils.exceptionString(_result.asInstanceOf[Failure[_]].exception))
    } else {
      None
    }
    OutputOperationInfo(
      time, outputOpId, callSite.shortForm, callSite.longForm, _startTime, _endTime, failureReason)
  }

  override def toString: String = id
} 
Example 33
Source File: JobWaiterSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import scala.util.Failure

import org.apache.spark.SparkFunSuite

class JobWaiterSuite extends SparkFunSuite {

  test("call jobFailed multiple times") {
    val waiter = new JobWaiter[Int](null, 0, totalTasks = 2, null)

    // Should not throw exception if calling jobFailed multiple times
    waiter.jobFailed(new RuntimeException("Oops 1"))
    waiter.jobFailed(new RuntimeException("Oops 2"))
    waiter.jobFailed(new RuntimeException("Oops 3"))

    waiter.completionFuture.value match {
      case Some(Failure(e)) =>
        // We should receive the first exception
        assert("Oops 1" === e.getMessage)
      case other => fail("Should receiver the first exception but it was " + other)
    }
  }
} 
Example 34
Source File: IterativeDeepeningSearch.scala    From aima-scala   with MIT License 5 votes vote down vote up
package aima.core.search.uninformed

import aima.core.search.Problem

import scala.annotation.tailrec
import scala.util.{Failure, Success, Try}


trait IterativeDeepeningSearch[State, Action] {
  def depthLimitedTreeSearch: DepthLimitedTreeSearch[State, Action]

  def search(problem: Problem[State, Action], noAction: Action): Try[DLSResult[Action]] = {
    @tailrec def searchHelper(currentDepth: Int): Try[DLSResult[Action]] = {
      val result = depthLimitedTreeSearch.search(problem, currentDepth, noAction)

      result match {
        case Success(Solution(_)) | Failure(_) => result
        case _ if currentDepth == Int.MaxValue =>
          Failure[DLSResult[Action]](new Exception("Depth has reached Int.MaxValue"))
        case _ => searchHelper(currentDepth + 1)
      }
    }

    searchHelper(currentDepth = 0)
  }
} 
Example 35
Source File: DepthLimitedTreeSearch.scala    From aima-scala   with MIT License 5 votes vote down vote up
package aima.core.search.uninformed

import aima.core.search.{Problem, ProblemSearch, StateNode}

import scala.annotation.tailrec
import scala.util.{Failure, Success, Try}

sealed trait DLSResult[Action] {
  def actions: List[Action]
}

final case class Solution[Action](actions: List[Action]) extends DLSResult[Action]
final case class CutOff[Action](actions: List[Action])   extends DLSResult[Action]


trait DepthLimitedTreeSearch[State, Action] extends ProblemSearch[State, Action, StateNode[State, Action]] {

  type Node = StateNode[State, Action]

  def search(problem: Problem[State, Action], initialLimit: Int, noAction: Action): Try[DLSResult[Action]] =
    Try {

      def recursiveDLS(node: Node, currentLimit: Int): Try[DLSResult[Action]] =
        Try {
          if (problem.isGoalState(node.state)) {
            Success(Solution(solution(node)))
          } else if (currentLimit == 0) {
            Success(CutOff(solution(node)))
          } else {
            val childNodes = for {
              action <- problem.actions(node.state)
            } yield newChildNode(problem, node, action)

            @tailrec def shortCircuitChildSearch(children: List[Node]): Try[DLSResult[Action]] = {
              children match {
                case Nil => Failure[DLSResult[Action]](new Exception("Exhausted child nodes"))
                case lastChild :: Nil =>
                  recursiveDLS(lastChild, currentLimit - 1)
                case firstChild :: rest =>
                  recursiveDLS(firstChild, currentLimit - 1) match {
                    case result @ Success(Solution(_)) => result
                    case _                             => shortCircuitChildSearch(rest)
                  }
              }
            }

            shortCircuitChildSearch(childNodes)
          }
        }.flatten

      recursiveDLS(makeNode(problem.initialState, noAction), initialLimit)
    }.flatten

  def makeNode(state: State, noAction: Action): Node = StateNode(state, noAction, None)

  def newChildNode(problem: Problem[State, Action], parent: Node, action: Action): Node = {
    val childState = problem.result(parent.state, action)
    StateNode(childState, action, Some(parent))
  }
} 
Example 36
Source File: TestBase.scala    From scalajs-rxjs   with MIT License 5 votes vote down vote up
//     Project: scalajs-rxjs
// Description: Common base class for utest-based tests

// Copyright (c) 2016. Distributed under the MIT License (see included LICENSE file).
package rxjs

import rxjs.TestBase.ObservableFuture
import utest._

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration
import scala.concurrent._
import scala.scalajs.js
import scala.util.{Failure, Try}

abstract class TestBase extends TestSuite {
  implicit val ec = scalajs.concurrent.JSExecutionContext.queue

//  def future[T](o: Observable[T]): ObservableFuture[T] = new ObservableFuture[T](o)
}

object TestBase {
  class ObservableFuture[T](obs: Observable[T]) extends Future[Seq[T]] {
    private var _data  = js.Array[T]()
    private val p = Promise[Seq[T]]()
    private lazy val future = p.future
    obs.subscribe((e:T)=> this.synchronized(_data.push(e)),
      (err:Any) => p.failure(new RuntimeException(err.toString)),
      () => p.success(_data) )

    override def onComplete[U](f: (Try[Seq[T]]) => U)(implicit executor: ExecutionContext): Unit = future.onComplete(f)
    override def isCompleted: Boolean = future.isCompleted
    override def value: Option[Try[Seq[T]]] = future.value
    def expectFailure(f: (Throwable)=>Any)(implicit ec: ExecutionContext): Future[Seq[T]] = {
      future.onSuccess{ case _ => throw new RuntimeException("expected Failure")}
      future.recover{
        case x =>
          f(x)
          Seq()
      }
    }

    @throws[Exception](classOf[Exception])
    override def result(atMost: Duration)(implicit permit: CanAwait): Seq[T] = future.result(atMost)

    @throws[InterruptedException](classOf[InterruptedException])
    @throws[TimeoutException](classOf[TimeoutException])
    override def ready(atMost: Duration)(implicit permit: CanAwait): ObservableFuture.this.type = ???

    // additional members since 2.12.0
    def transform[S](f: scala.util.Try[Seq[T]] => scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = ???
    def transformWith[S](f: scala.util.Try[Seq[T]] => scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = ???
  }
} 
Example 37
Source File: Node.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network.discovery

import java.net.{InetSocketAddress, _}

import akka.util.ByteString
import io.iohk.ethereum.network
import io.iohk.ethereum.utils.Logger
import org.spongycastle.util.encoders.Hex

import scala.util.{Failure, Success, Try}

case class Node(id: ByteString, addr: InetSocketAddress) {
  def toUri: URI = {
    val host = network.getHostName(addr.getAddress)
    val port = addr.getPort
    new URI(s"enode://${Hex.toHexString(id.toArray[Byte])}@$host:$port")
  }
}

object Node {
  def fromUri(uri: URI): Node = {
    val nodeId = ByteString(Hex.decode(uri.getUserInfo))
    Node(nodeId, new InetSocketAddress(uri.getHost, uri.getPort))
  }
}

object NodeParser extends Logger {
  val NodeScheme = "enode"
  val NodeIdSize = 64

  
  def parseNodes(unParsedNodes: Set[String]): Set[Node] = unParsedNodes.foldLeft[Set[Node]](Set.empty) {
    case (parsedNodes, nodeString) =>
      val maybeNode = NodeParser.parseNode(nodeString)
      maybeNode match {
        case Right(node) => parsedNodes + node
        case Left(errors) =>
          log.warn(s"Unable to parse node: $nodeString due to: ${errors.map(_.getMessage).mkString("; ")}")
          parsedNodes
      }
  }
} 
Example 38
Source File: Mantis.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum

import io.iohk.ethereum.blockchain.sync.SyncController
import io.iohk.ethereum.mining.Miner
import io.iohk.ethereum.network.discovery.DiscoveryListener
import io.iohk.ethereum.network.{PeerManagerActor, ServerActor}
import io.iohk.ethereum.nodebuilder.Node
import io.iohk.ethereum.utils.Logger

import scala.concurrent.Await
import scala.util.{Failure, Success, Try}

object Mantis {

  def main(args: Array[String]): Unit = {

    new Node with Logger {

      def tryAndLogFailure(f: () => Any): Unit = Try(f()) match {
        case Failure(e) => log.warn("Error while shutting down...", e)
        case Success(_) =>
      }

      override def shutdown(): Unit = {
        tryAndLogFailure(() => Await.ready(actorSystem.terminate, shutdownTimeoutDuration))
        tryAndLogFailure(() => storagesInstance.dataSources.closeAll())
      }

      genesisDataLoader.loadGenesisData()

      peerManager ! PeerManagerActor.StartConnecting
      server ! ServerActor.StartServer(networkConfig.Server.listenAddress)

      if (discoveryConfig.discoveryEnabled) {
        discoveryListener ! DiscoveryListener.Start
      }

      syncController ! SyncController.Start

      if (miningConfig.miningEnabled) {
        miner ! Miner.StartMining
      }

      peerDiscoveryManager // unlazy

      maybeJsonRpcServer match {
        case Right(jsonRpcServer) if jsonRpcServerConfig.enabled => jsonRpcServer.run()
        case Left(error) if jsonRpcServerConfig.enabled => log.error(error)
        case _=> //Nothing
      }
    }

  }
} 
Example 39
Source File: JsonRpcHttpsServer.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.jsonrpc.server

import java.io.{File, FileInputStream}
import java.security.{KeyStore, SecureRandom}
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}

import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.HttpOriginRange
import akka.http.scaladsl.{ConnectionContext, Http}
import akka.stream.ActorMaterializer
import io.iohk.ethereum.jsonrpc.JsonRpcController
import io.iohk.ethereum.jsonrpc.server.JsonRpcHttpsServer.HttpsSetupResult
import io.iohk.ethereum.jsonrpc.server.JsonRpcServer.JsonRpcServerConfig
import io.iohk.ethereum.utils.Logger

import scala.concurrent.ExecutionContext.Implicits.global
import scala.io.Source
import scala.util.{Failure, Success, Try}

class JsonRpcHttpsServer(val jsonRpcController: JsonRpcController, config: JsonRpcServerConfig,
                         secureRandom: SecureRandom)(implicit val actorSystem: ActorSystem)
  extends JsonRpcServer with Logger {

  def run(): Unit = {
    implicit val materializer = ActorMaterializer()

    val maybeSslContext = validateCertificateFiles(config.certificateKeyStorePath, config.certificateKeyStoreType, config.certificatePasswordFile).flatMap{
      case (keystorePath, keystoreType, passwordFile) =>
        val passwordReader = Source.fromFile(passwordFile)
        try {
          val password = passwordReader.getLines().mkString
          obtainSSLContext(keystorePath, keystoreType, password)
        } finally {
          passwordReader.close()
        }
    }

    val maybeHttpsContext = maybeSslContext.map(sslContext => ConnectionContext.https(sslContext))

    maybeHttpsContext match {
      case Right(httpsContext) =>
        Http().setDefaultServerHttpContext(httpsContext)
        val bindingResultF = Http().bindAndHandle(route, config.interface, config.port, connectionContext = httpsContext)

        bindingResultF onComplete {
          case Success(serverBinding) => log.info(s"JSON RPC HTTPS server listening on ${serverBinding.localAddress}")
          case Failure(ex) => log.error("Cannot start JSON HTTPS RPC server", ex)
        }
      case Left(error) => log.error(s"Cannot start JSON HTTPS RPC server due to: $error")
    }
  }

  
  private def validateCertificateFiles(maybeKeystorePath: Option[String],
                                       maybeKeystoreType: Option[String],
                                       maybePasswordFile: Option[String]): HttpsSetupResult[(String, String, String)] =
    (maybeKeystorePath, maybeKeystoreType, maybePasswordFile) match {
      case (Some(keystorePath), Some(keystoreType), Some(passwordFile)) =>
        val keystoreDirMissing = !new File(keystorePath).isFile
        val passwordFileMissing = !new File(passwordFile).isFile
        if(keystoreDirMissing && passwordFileMissing)
          Left("Certificate keystore path and password file configured but files are missing")
        else if(keystoreDirMissing)
          Left("Certificate keystore path configured but file is missing")
        else if(passwordFileMissing)
          Left("Certificate password file configured but file is missing")
        else
          Right((keystorePath, keystoreType, passwordFile))
      case _ =>
        Left("HTTPS requires: certificate-keystore-path, certificate-keystore-type and certificate-password-file to be configured")
    }

  override def corsAllowedOrigins: HttpOriginRange = config.corsAllowedOrigins
}

object JsonRpcHttpsServer {
  type HttpsSetupResult[T] = Either[String, T]
} 
Example 40
Source File: JsonRpcHttpServer.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.jsonrpc.server

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.HttpOriginRange
import akka.stream.ActorMaterializer
import io.iohk.ethereum.jsonrpc._
import io.iohk.ethereum.jsonrpc.server.JsonRpcServer.JsonRpcServerConfig
import io.iohk.ethereum.utils.Logger

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class JsonRpcHttpServer(val jsonRpcController: JsonRpcController, config: JsonRpcServerConfig)
                       (implicit val actorSystem: ActorSystem)
  extends JsonRpcServer with Logger {

  def run(): Unit = {
    implicit val materializer = ActorMaterializer()

    val bindingResultF = Http(actorSystem).bindAndHandle(route, config.interface, config.port)

    bindingResultF onComplete {
      case Success(serverBinding) => log.info(s"JSON RPC HTTP server listening on ${serverBinding.localAddress}")
      case Failure(ex) => log.error("Cannot start JSON HTTP RPC server", ex)
    }
  }

  override def corsAllowedOrigins: HttpOriginRange = config.corsAllowedOrigins
} 
Example 41
Source File: FastSyncStateStorageActor.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import akka.actor.{Actor, ActorLogging}
import akka.pattern.pipe
import io.iohk.ethereum.blockchain.sync.FastSync.SyncState
import io.iohk.ethereum.blockchain.sync.FastSyncStateStorageActor.GetStorage
import io.iohk.ethereum.db.storage.FastSyncStateStorage

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}


class FastSyncStateStorageActor extends Actor with ActorLogging {

  def receive: Receive = {
    // after initialization send a valid Storage reference
    case storage: FastSyncStateStorage => context become idle(storage)
  }

  def idle(storage: FastSyncStateStorage): Receive = {
    // begin saving of the state to the storage and become busy
    case state: SyncState => persistState(storage, state)

    case GetStorage => sender() ! storage.getSyncState()
  }

  def busy(storage: FastSyncStateStorage, stateToPersist: Option[SyncState]): Receive = {
    // update state waiting to be persisted later. we only keep newest state
    case state: SyncState => context become busy(storage, Some(state))
    // exception was thrown during persisting of a state. push
    case Failure(e) => throw e
    // state was saved in the storage. become idle
    case Success(s: FastSyncStateStorage) if stateToPersist.isEmpty => context become idle(s)
    // state was saved in the storage but new state is already waiting to be saved.
    case Success(s: FastSyncStateStorage) if stateToPersist.isDefined => stateToPersist.foreach(persistState(s, _))

    case GetStorage => sender() ! storage.getSyncState()
  }

  private def persistState(storage: FastSyncStateStorage, syncState: SyncState): Unit = {
    import context.dispatcher
    val persistingQueues: Future[Try[FastSyncStateStorage]] = Future {
      lazy val result = Try { storage.putSyncState(syncState) }
      if (log.isDebugEnabled) {
        val now = System.currentTimeMillis()
        result
        val end = System.currentTimeMillis()
        log.debug(s"Saving snapshot of a fast sync took ${end - now} ms")
        result
      } else {
        result
      }
    }
    persistingQueues pipeTo self
    context become busy(storage, None)
  }

}

object FastSyncStateStorageActor {
  case object GetStorage
} 
Example 42
Source File: Main.scala    From scala-json-rpc   with MIT License 5 votes vote down vote up
package io.github.shogowada.scala.jsonrpc.example.e2e.websocket

import java.io.IOException

import io.github.shogowada.scala.jsonrpc.JSONRPCServerAndClient
import io.github.shogowada.scala.jsonrpc.Types.JSONSender
import io.github.shogowada.scala.jsonrpc.client.JSONRPCClient
import io.github.shogowada.scala.jsonrpc.serializers.UpickleJSONSerializer
import io.github.shogowada.scala.jsonrpc.server.JSONRPCServer
import io.github.shogowada.scalajs.reactjs.ReactDOM
import io.github.shogowada.scalajs.reactjs.VirtualDOM._
import org.scalajs.dom
import org.scalajs.dom.WebSocket

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Future, Promise}
import scala.scalajs.js.JSApp
import scala.util.{Failure, Try}

object Main extends JSApp {
  override def main(): Unit = {
    val futureWebSocket = createFutureWebSocket()
    val serverAndClient = createServerAndClient(futureWebSocket)

    val mountNode = dom.document.getElementById("mount-node")
    ReactDOM.render(
      <((new TodoListView(serverAndClient.createAPI[TodoRepositoryAPI])) ()).empty,
      mountNode
    )
  }

  private def createFutureWebSocket(): Future[WebSocket] = {
    val promisedWebSocket: Promise[WebSocket] = Promise()
    val webSocket = new dom.WebSocket(webSocketUrl)

    webSocket.onopen = (_: dom.Event) => {
      promisedWebSocket.success(webSocket)
    }

    webSocket.onerror = (event: dom.Event) => {
      promisedWebSocket.failure(new IOException(event.toString))
    }

    promisedWebSocket.future
  }

  private def webSocketUrl: String = {
    val location = dom.window.location
    val protocol = location.protocol match {
      case "http:" => "ws:"
      case "https:" => "wss:"
    }
    s"$protocol//${location.host}/jsonrpc"
  }

  private def createServerAndClient(futureWebSocket: Future[WebSocket]): JSONRPCServerAndClient[UpickleJSONSerializer] = {
    val jsonSerializer = UpickleJSONSerializer()

    val server = JSONRPCServer(jsonSerializer)

    val jsonSender: JSONSender = (json: String) => {
      futureWebSocket
          .map(webSocket => Try(webSocket.send(json)))
          .flatMap(tried => tried.fold(
            throwable => Future.failed(throwable),
            _ => Future(None)
          ))
    }
    val client = JSONRPCClient(jsonSerializer, jsonSender)

    val serverAndClient = JSONRPCServerAndClient(server, client)

    futureWebSocket.foreach(webSocket => {
      webSocket.onmessage = (event: dom.MessageEvent) => {
        val message = event.data.toString
        serverAndClient.receiveAndSend(message).onComplete {
          case Failure(throwable) => {
            println("Failed to send response", throwable)
          }
          case _ =>
        }
      }
    })

    serverAndClient
  }
} 
Example 43
Source File: ResourceManagement.scala    From scalismo-faces   with Apache License 2.0 5 votes vote down vote up
package scalismo.faces.utils

import java.io.Closeable

import scala.io.Source
import scala.util.control.NonFatal
import scala.util.{Failure, Try}


  def usingOption[T <: Closeable, R](obj: => Option[T], after: T => Unit = { t: T => t.close() })(block: T => Option[R]): Option[R] = {
    val o: Option[T] = try {
      obj
    } catch {
      case NonFatal(e) => None
    }
    o.flatMap { res =>
      try {
        block(res)
      } finally {
        after(res)
      }
    }
  }

} 
Example 44
Source File: SeedDetector.scala    From random-data-generator   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.randomdatagenerator.utils

import org.scalacheck.rng.Seed

import scala.util.{Failure, Success, Try}

private[randomdatagenerator] object SeedDetector extends SeedDetector

private[randomdatagenerator] trait SeedDetector {

  protected lazy val logger = new PrettyPrinter()


  lazy val seed: Seed = createSeedObj(seedValue)

  private def createSeedObj(seedValue: Long): Seed = {
    logger.info(s"Generating random data using seed $seedValue")
    logger.info(s"Replicate this session by setting ${SeedVariable.name}=$seedValue")
    Seed(seedValue)
  }

  private lazy val seedValue: Long = optLongVariable match {
    case Some(preSelectedSeed) =>
      logger.info(s"Variable ${SeedVariable.name} detected: setting $preSelectedSeed as seed")
      preSelectedSeed
    case None =>
      logger.info(s"No variable ${SeedVariable.name} detected: setting random seed")
      randomLong
  }

  private lazy val optLongVariable: Option[Long] = envVariable.map { value =>
    Try(value.toLong) match {
      case Success(l) => l
      case Failure(ex) => throw new RuntimeException(s"Please, provide a numeric value for ${SeedVariable.name}", ex)
    }
  }

  protected lazy val envVariable: Option[String] = SeedVariable.value
  protected def randomLong = scala.util.Random.nextLong
} 
Example 45
Source File: Tests.scala    From lolhttp   with Apache License 2.0 5 votes vote down vote up
package lol.http

import cats.effect.IO

import org.scalatest._

import scala.util.{Try, Success, Failure}

import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration._

abstract class Tests extends FunSuite with Matchers with OptionValues with Inside with Inspectors {
  val Pure = Tag("Pure")
  val Slow = Tag("Slow")
  def await[A](atMost: FiniteDuration = 30.seconds)(a: IO[A]): A = Await.result(a.unsafeToFuture, atMost)
  def withServer(server: Server)(test: Server => Unit) = try { test(server) } finally { server.stop() }
  def status(req: Request, atMost: FiniteDuration = 30.seconds, followRedirects: Boolean = true, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): Int = {
    await(atMost) { Client.run(req, followRedirects = followRedirects, timeout = atMost, protocol = protocol)(res => IO.pure(res.status)) }
  }
  def contentString(req: Request, atMost: FiniteDuration = 30.seconds, followRedirects: Boolean = true, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): String = {
    await(atMost) { Client.run(req, followRedirects = followRedirects, timeout = atMost, protocol = protocol)(_.readAs[String]) }
  }
  def headers(req: Request, atMost: FiniteDuration = 30.seconds, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): Map[HttpString,HttpString] = {
    await(atMost) { Client.run(req, timeout = atMost, protocol = protocol)(res => IO.pure(res.headers)) }
  }
  def header(req: Request, header: HttpString, atMost: FiniteDuration = 30.seconds, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): Option[HttpString] = {
    await(atMost) { Client.run(req, timeout = atMost, protocol = protocol)(res => IO.pure(res.headers.get(header))) }
  }
  def getString(content: Content, codec: String = "utf-8") = new String(getBytes(content).toArray, codec)
  def getBytes(content: Content): Vector[Byte] = content.stream.compile.toVector.unsafeRunSync()
  def bytes(data: Int*): Seq[Byte] = data.map(_.toByte)
  def eventually[A](assertion: => A, timeout: FiniteDuration = 5.seconds): A = {
    val start = System.currentTimeMillis
    def go(): A = Try(assertion) match {
      case Success(a) => a
      case Failure(e) =>
        if(System.currentTimeMillis - start < timeout.toMillis) go() else throw e
    }
    go()
  }
} 
Example 46
Source File: Server.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.examples.akka.http

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.typesafe.scalalogging.LazyLogging
import io.opencensus.scala.akka.http.TracingDirective._
import io.opencensus.trace.AttributeValue
import org.slf4j.bridge.SLF4JBridgeHandler

import scala.util.{Failure, Success}

object Server extends App with LazyLogging {
  // Forward java.util.Logging to slf4j
  SLF4JBridgeHandler.removeHandlersForRootLogger()
  SLF4JBridgeHandler.install()

  implicit val system: ActorSystem = ActorSystem()
  import system.dispatcher

  val routes: Route = traceRequest { span =>
    complete {
      val attrValue = AttributeValue.stringAttributeValue("test")
      span.putAttribute("my-attribute", attrValue)
      "Hello opencensus"
    }
  }

  logger.info("Binding...")
  Http().bindAndHandle(routes, "0.0.0.0", 8080).onComplete {
    case Success(bound) =>
      logger.info(s"Bound to ${bound.localAddress}")
    case Failure(e) =>
      logger.error("Failed to bind", e)
  }
} 
Example 47
Source File: Client.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.examples.akka.http

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.scaladsl.{Sink, Source}
import io.opencensus.scala.akka.http.TracingClient
import org.slf4j.bridge.SLF4JBridgeHandler

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}

object Client extends App {
  // Forward java.util.Logging to slf4j
  SLF4JBridgeHandler.removeHandlersForRootLogger()
  SLF4JBridgeHandler.install()

  implicit val system: ActorSystem = ActorSystem()
  import system.dispatcher

  def await[T](f: Future[T]) = Await.result(f, 3.seconds)

  // Request level client
  val pipeling = Http().singleRequest(_: HttpRequest)
  val r1 = await {
    TracingClient
      .traceRequest(pipeling)(HttpRequest(uri = "http://localhost:8080"))
      .flatMap(_.entity.toStrict(1.second))
      .map(_.data.utf8String)
  }
  println(r1)

  // Host level client
  val pool     = Http().cachedHostConnectionPool[Unit]("localhost", 8080)
  val hostFlow = TracingClient.traceRequestForPool(pool)

  val r2 = await {
    Source
      .single(HttpRequest(uri = "/"))
      .map((_, ()))
      .via(hostFlow)
      .map(_._1)
      .flatMapConcat {
        case Success(response) => response.entity.dataBytes
        case Failure(e)        => throw e
      }
      .map(_.utf8String)
      .runWith(Sink.head)
  }
  println(r2)

  // Connection level client
  val connection     = Http().outgoingConnection("localhost", 8080)
  val connectionFlow = TracingClient.traceRequestForConnection(connection)

  val r3 = await {
    Source
      .single(HttpRequest(uri = "/"))
      .via(connectionFlow)
      .flatMapConcat(_.entity.dataBytes)
      .map(_.utf8String)
      .runWith(Sink.head)
  }
  println(r3)
} 
Example 48
Source File: B3FormatPropagationSpec.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.http.testSuite.propagation

import io.opencensus.scala.http.propagation.B3FormatPropagation
import io.opencensus.trace.BlankSpan
import org.scalatest.TryValues

import scala.util.Failure
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class B3FormatPropagationSpec
    extends AnyFlatSpec
    with Matchers
    with TryValues
    with B3FormatPropagation[(String, String), Map[String, String]] {

  val fakeTraceId = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
  val fakeSpanId  = "bbbbbbbbbbbbbbbb"

  "headersWithTracingContext" should "return the correct B3 headers from a spans context" in {
    headersWithTracingContext(BlankSpan.INSTANCE) should contain theSameElementsAs List(
      "X-B3-TraceId" -> "00000000000000000000000000000000",
      "X-B3-SpanId"  -> "0000000000000000"
    )
  }

  behavior of "extractContext"
  it should "return a span context with the values from the B3 http headers" in {
    val request = Map(
      "X-B3-TraceId" -> fakeTraceId,
      "X-B3-SpanId"  -> fakeSpanId,
      "X-B3-Sampled" -> "1"
    )

    val context = extractContext(request).success.value
    context.getTraceId.toLowerBase16 shouldBe fakeTraceId
    context.getSpanId.toLowerBase16 shouldBe fakeSpanId
    context.getTraceOptions.isSampled shouldBe true
  }

  it should "return a failure when the headers are missing" in {
    extractContext(Map.empty) shouldBe a[Failure[_]]
  }

  override def headerValue(
      req: Map[String, String],
      key: String
  ): Option[String] = req.get(key)

  override def createHeader(key: String, value: String): (String, String) =
    (key, value)
} 
Example 49
Source File: MockPropagation.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.http.testSuite

import io.opencensus.scala.http.propagation.Propagation
import io.opencensus.trace._

import scala.collection.immutable
import scala.util.{Failure, Success, Try}

trait MockPropagation[Header, Request] extends Propagation[Header, Request] {

  def rawHeader(key: String, value: String): Header
  def path(request: Request): String

  val requestPathWithoutParent = "/no/parent/context"
  val fakeTraceId              = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
  val fakeSpanId               = "bbbbbbbbbbbbbbbb"
  val sampledSpanContext = SpanContext.create(
    TraceId.fromLowerBase16(fakeTraceId),
    SpanId.fromLowerBase16(fakeSpanId),
    TraceOptions.builder().setIsSampled(true).build(),
    Tracestate.builder.build()
  )

  override def headersWithTracingContext(span: Span): immutable.Seq[Header] =
    List(rawHeader("X-Mock-Trace", "12345"))

  override def extractContext(request: Request): Try[SpanContext] =
    if (path(request) == requestPathWithoutParent)
      Failure(new Exception("test error"))
    else
      Success(sampledSpanContext)

} 
Example 50
Source File: NativeFunctionRegistration.scala    From spark-alchemy   with Apache License 2.0 5 votes vote down vote up
package com.swoop.alchemy.spark.expressions

import org.apache.spark.sql.EncapsulationViolator.createAnalysisException
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionDescription, ExpressionInfo, RuntimeReplaceable}

import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}

// based on Spark's FunctionRegistry @ossSpark
trait NativeFunctionRegistration extends FunctionRegistration {

  type FunctionBuilder = Seq[Expression] => Expression

  def expressions: Map[String, (ExpressionInfo, FunctionBuilder)]


  def registerFunctions(fr: FunctionRegistry): Unit = {
    expressions.foreach { case (name, (info, builder)) => fr.registerFunction(FunctionIdentifier(name), info, builder) }
  }

  def registerFunctions(spark: SparkSession): Unit = {
    registerFunctions(spark.sessionState.functionRegistry)
  }

  
  protected def expressionInfo[T <: Expression : ClassTag](name: String): ExpressionInfo = {
    val clazz = scala.reflect.classTag[T].runtimeClass
    val df = clazz.getAnnotation(classOf[ExpressionDescription])
    if (df != null) {
      new ExpressionInfo(clazz.getCanonicalName, null, name, df.usage(), df.extended())
    } else {
      new ExpressionInfo(clazz.getCanonicalName, name)
    }
  }

} 
Example 51
Source File: RefinedTypeOpsSpec.scala    From refined   with MIT License 5 votes vote down vote up
package eu.timepit.refined.api

import eu.timepit.refined.types.numeric.NonNegInt
import org.scalacheck.Prop._
import org.scalacheck.Properties
import scala.util.{Failure, Success, Try}

class RefinedTypeOpsSpec extends Properties("RefinedTypeOps") {

  property("from ~= unapply") = forAll { i: Int =>
    NonNegInt.from(i).right.toOption ?= NonNegInt.unapply(i)
  }

  property("from ~= unsafeFrom") = forAll { i: Int =>
    val stringOrNonNegInt = Try(NonNegInt.unsafeFrom(i)) match {
      case Success(n) => Right(n)
      case Failure(t) => Left(t.getMessage)
    }
    NonNegInt.from(i) ?= stringOrNonNegInt
  }
} 
Example 52
Source File: RefTypeMonadErrorSpec.scala    From refined   with MIT License 5 votes vote down vote up
package eu.timepit.refined.cats

import _root_.cats.MonadError
import eu.timepit.refined.types.numeric.PosInt
import org.scalacheck.Prop._
import org.scalacheck.Properties
import scala.annotation.tailrec
import scala.util.{Failure, Success, Try}

trait Decoder[A] {
  def decode(s: String): Either[String, A]
}

object Decoder {
  def apply[A](implicit d: Decoder[A]): Decoder[A] = d

  def instance[A](f: String => Either[String, A]): Decoder[A] =
    new Decoder[A] {
      override def decode(s: String): Either[String, A] = f(s)
    }

  implicit val decoderMonadError: MonadError[Decoder, String] =
    new MonadError[Decoder, String] {
      override def flatMap[A, B](fa: Decoder[A])(f: A => Decoder[B]): Decoder[B] =
        instance { s =>
          fa.decode(s) match {
            case Right(a)  => f(a).decode(s)
            case Left(err) => Left(err)
          }
        }

      override def tailRecM[A, B](a: A)(f: A => Decoder[Either[A, B]]): Decoder[B] = {
        @tailrec
        def step(s: String, a1: A): Either[String, B] =
          f(a1).decode(s) match {
            case Right(Right(b)) => Right(b)
            case Right(Left(a2)) => step(s, a2)
            case Left(err)       => Left(err)
          }

        instance(s => step(s, a))
      }

      override def raiseError[A](e: String): Decoder[A] =
        instance(_ => Left(e))

      override def handleErrorWith[A](fa: Decoder[A])(f: String => Decoder[A]): Decoder[A] =
        instance { s =>
          fa.decode(s) match {
            case Right(a)  => Right(a)
            case Left(err) => f(err).decode(s)
          }
        }

      override def pure[A](x: A): Decoder[A] =
        instance(_ => Right(x))
    }

  implicit val intDecoder: Decoder[Int] =
    instance(s =>
      Try(s.toInt) match {
        case Success(i) => Right(i)
        case Failure(t) => Left(t.getMessage)
      }
    )
}

class RefTypeMonadErrorSpec extends Properties("MonadError") {

  property("Decoder[Int]") = secure {
    Decoder[Int].decode("1") ?= Right(1)
  }

  property("derive Decoder[PosInt] via MonadError[Decoder, String]") = {
    // This import is needed because of https://github.com/scala/bug/issues/10753
    import Decoder.decoderMonadError
    import eu.timepit.refined.cats.derivation._

    val decoder = Decoder[PosInt]
    (decoder.decode("1") ?= Right(PosInt.unsafeFrom(1))) &&
    (decoder.decode("-1") ?= Left("Predicate failed: (-1 > 0)."))
  }
} 
Example 53
Source File: CounterEtlItem.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.loader.core

import org.apache.s2graph.counter.util.UnitConverter
import org.slf4j.LoggerFactory
import play.api.libs.json._
import scala.util.{Failure, Success, Try}

case class CounterEtlItem(ts: Long, service: String, action: String, item: String, dimension: JsValue, property: JsValue, useProfile: Boolean = false) {
   def toKafkaMessage: String = {
     s"$ts\t$service\t$action\t$item\t${dimension.toString()}\t${property.toString()}"
   }

   lazy val value = {
     (property \ "value").toOption match {
       case Some(JsNumber(n)) => n.longValue()
       case Some(JsString(s)) => s.toLong
       case None => 1L
       case _ => throw new Exception("wrong type")
     }
   }
 }

object CounterEtlItem {
   val log = LoggerFactory.getLogger(this.getClass)

   def apply(line: String): Option[CounterEtlItem] = {
     Try {
       val Array(ts, service, action, item, dimension, property) = line.split('\t')
       CounterEtlItem(UnitConverter.toMillis(ts.toLong), service, action, item, Json.parse(dimension), Json.parse(property))
     } match {
       case Success(item) =>
         Some(item)
       case Failure(ex) =>
         log.error(">>> failed")
         log.error(s"${ex.toString}: $line")
         None
     }
   }
 } 
Example 54
Source File: Retry.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.util

import scala.annotation.tailrec
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success, Try}

object Retry {
  @tailrec
  def apply[T](n: Int, withSleep: Boolean = true, tryCount: Int = 0)(fn: => T): T = {
    Try { fn } match {
      case Success(x) => x
      case Failure(e) if e.isInstanceOf[RetryStopException] => throw e.getCause
      case _ if n > 1 =>
        // backoff
        if (withSleep) Thread.sleep(tryCount * 1000)
        apply(n - 1, withSleep, tryCount + 1)(fn)
      case Failure(e) => throw e
    }
  }
}

object RetryAsync {
  def apply[T](n: Int, withSleep: Boolean = true, tryCount: Int = 0)(fn: => Future[T])(implicit ex: ExecutionContext): Future[T] = {
    val promise = Promise[T]()
    fn onComplete {
      case Success(x) => promise.success(x)
      case Failure(e) if e.isInstanceOf[RetryStopException] => promise.failure(e.getCause)
      case _ if n > 1 =>
        // backoff
        if (withSleep) Thread.sleep(tryCount * 1000)
        apply(n - 1, withSleep, tryCount + 1)(fn)
      case Failure(e) => promise.failure(e)
    }
    promise.future
  }
}

class RetryStopException(message: String, cause: Throwable)
  extends Exception(message, cause) {

  def this(message: String) = this(message, null)

  def this(cause: Throwable) = this(cause.toString, cause)
} 
Example 55
Source File: Server.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.http

import java.time.Instant

import scala.language.postfixOps
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.S2Graph
import org.slf4j.LoggerFactory

object Server extends App
  with S2GraphTraversalRoute
  with S2GraphAdminRoute
  with S2GraphMutateRoute
  with S2GraphQLRoute {

  implicit val system: ActorSystem = ActorSystem("S2GraphHttpServer")
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  implicit val executionContext: ExecutionContext = system.dispatcher

  val config = ConfigFactory.load()

  override val s2graph = new S2Graph(config)
  override val logger = LoggerFactory.getLogger(this.getClass)

  val port = sys.props.get("http.port").fold(8000)(_.toInt)
  val interface = sys.props.get("http.interface").fold("0.0.0.0")(identity)

  val startAt = System.currentTimeMillis()

  def uptime = System.currentTimeMillis() - startAt

  def serverHealth = s"""{ "port": ${port}, "interface": "${interface}", "started_at": ${Instant.ofEpochMilli(startAt)}, "uptime": "${uptime} millis" """

  def health = HttpResponse(status = StatusCodes.OK, entity = HttpEntity(ContentTypes.`application/json`, serverHealth))

  // Allows you to determine routes to expose according to external settings.
  lazy val routes: Route = concat(
    pathPrefix("graphs")(traversalRoute),
    pathPrefix("mutate")(mutateRoute),
    pathPrefix("admin")(adminRoute),
    pathPrefix("graphql")(graphqlRoute),
    get(complete(health))
  )

  val binding: Future[Http.ServerBinding] = Http().bindAndHandle(routes, interface, port)
  binding.onComplete {
    case Success(bound) => logger.info(s"Server online at http://${bound.localAddress.getHostString}:${bound.localAddress.getPort}/")
    case Failure(e) => logger.error(s"Server could not start!", e)
  }

  scala.sys.addShutdownHook { () =>
    s2graph.shutdown()
    system.terminate()
    logger.info("System terminated")
  }

  Await.result(system.whenTerminated, Duration.Inf)
} 
Example 56
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.api.ws.connection

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging {

  log.info(s"""Connecting to Matcher WS API:
            |         URI = $uri
            |  Keep alive = $keepAlive""".stripMargin)

  import materializer.executionContext

  private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive)

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // From test to server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]()

  // From server to test
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage =>
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x) => {
              messagesBuffer.add(x)
              if (keepAlive) x match {
                case value: WsPingOrPong => wsHandlerRef ! value
                case _                   =>
              }
              Future.successful(x)
            }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  val connectionOpenedTs: Long                   = System.currentTimeMillis
  val connectionClosedTs: Future[Long]           = closed.map(_ => System.currentTimeMillis)
  val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS))

  def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList
  def clearMessages(): Unit           = messagesBuffer.clear()

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def close(): Unit     = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
  def isClosed: Boolean = closed.isCompleted
} 
Example 57
Source File: BlockchainCache.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.caches

import java.time.Duration

import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.wavesplatform.dex.domain.utils.ScorexLogging

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


abstract class BlockchainCache[K <: AnyRef, V <: AnyRef](loader: K => Future[V], expiration: Option[Duration], invalidationPredicate: V => Boolean)(
    implicit ec: ExecutionContext)
    extends ScorexLogging {

  lazy private val cache: LoadingCache[K, Future[V]] = {
    val builder = CacheBuilder.newBuilder
    expiration
      .fold(builder)(builder.expireAfterWrite)
      .build {
        new CacheLoader[K, Future[V]] {
          override def load(key: K): Future[V] = loader(key) andThen {
            case Success(value) if invalidationPredicate(value) =>
              cache.invalidate(key) // value may persist for a little longer than expected due to the fact that all the threads in the EC may be busy
            case Failure(exception) => log.error(s"Error while value loading occurred: ", exception); cache.invalidate(key)
          }
        }
      }
  }

  def get(key: K): Future[V] = cache.get(key)

  def put(key: K, value: Future[V]): Unit = cache.put(key, value)
}

object BlockchainCache {

  def noCustomInvalidationLogic[V](value: V): Boolean = false
} 
Example 58
Source File: package.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.domain

import scala.util.{Failure, Success, Try}

package object utils {

  private val BytesMaxValue  = 256
  private val Base58MaxValue = 58

  private val BytesLog = math.log(BytesMaxValue)
  private val BaseLog  = math.log(Base58MaxValue)

  def base58Length(byteArrayLength: Int): Int = math.ceil(BytesLog / BaseLog * byteArrayLength).toInt

  implicit class EitherExt2[A, B](ei: Either[A, B]) {

    def explicitGet(): B = ei match {
      case Left(value)  => throw makeException(value)
      case Right(value) => value
    }

    def foldToTry: Try[B] = ei.fold(
      left => Failure(makeException(left)),
      right => Success(right)
    )

    @inline
    private[this] def makeException(value: Any): Throwable = value match {
      case err: Throwable => err
      case _              => new RuntimeException(value.toString)
    }
  }
} 
Example 59
Source File: KeyPair.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.domain.account

import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.bytes.ByteStr._
import com.wavesplatform.dex.domain.bytes.codec.Base58
import com.wavesplatform.dex.domain.crypto
import com.wavesplatform.dex.domain.error.ValidationError.GenericError
import play.api.libs.json.{Format, Json, Writes}

import scala.util.{Failure, Success}

final case class KeyPair(seed: ByteStr) {
  lazy val (PrivateKey(privateKey), PublicKey(publicKey)) = crypto.createKeyPair(seed)
}

object KeyPair {

  def fromSeed(base58: String): Either[GenericError, KeyPair] = Base58.tryDecodeWithLimit(base58) match {
    case Success(x) => Right(KeyPair(x))
    case Failure(e) => Left(GenericError(s"Unable to get a private key from the seed '$base58': ${e.getMessage}"))
  }

  implicit class KeyPairImplicitOps(private val kp: KeyPair) extends AnyVal {
    def toAddress: Address = PublicKey.toAddress(kp)
  }

  implicit def toPublicKey(kp: KeyPair): PublicKey   = kp.publicKey
  implicit def toPrivateKey(kp: KeyPair): PrivateKey = kp.privateKey
  implicit def toAddress(keyPair: KeyPair): Address  = keyPair.toAddress

  implicit val jsonFormat: Format[KeyPair] = Format(
    byteStrFormat.map(KeyPair(_)),
    Writes(v => Json.obj("seed" -> Base58.encode(v.seed), "publicKey" -> v.publicKey, "privateKey" -> v.privateKey))
  )
} 
Example 60
Source File: AssetPair.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.domain.asset

import com.wavesplatform.dex.domain.asset.Asset.{IssuedAsset, Waves}
import com.wavesplatform.dex.domain.bytes.{ByteStr, deser}
import com.wavesplatform.dex.domain.validation.Validation
import com.wavesplatform.dex.domain.validation.Validation.booleanOperators
import io.swagger.annotations.{ApiModel, ApiModelProperty}
import net.ceedubs.ficus.readers.ValueReader
import play.api.libs.functional.syntax._
import play.api.libs.json._

import scala.util.{Failure, Success, Try}

@ApiModel(
  description = """A pair of assets sorted by two rules:
      1. A price asset is chosen by a priority from priceAssets of /matcher/settings;
      2. If both assets are not present among priceAssets, they are sorted lexicographically: price asset bytes < amount asset bytes""")
case class AssetPair(@ApiModelProperty(
                       value = "Base58 encoded amount asset ID. Waves is used if field isn't specified",
                       dataType = "string",
                       example = "8LQW8f7P5d5PZM7GtZEBgaqRPGSzS3DfPuiXrURJ4AJS",
                     ) amountAsset: Asset,
                     @ApiModelProperty(
                       value = "Base58 encoded price asset ID. Waves is used if field isn't specified",
                       dataType = "string",
                       example = "DG2xFkPdDwKUoBkzGAhQtLpSGzfXLiCYPEzeKH2Ad24p",
                     ) priceAsset: Asset) {

  @ApiModelProperty(hidden = true)
  lazy val priceAssetStr: String = priceAsset.toString

  @ApiModelProperty(hidden = true)
  lazy val amountAssetStr: String = amountAsset.toString

  def key: String = amountAssetStr + "-" + priceAssetStr

  override def toString: String = key

  def isValid: Validation = (amountAsset != priceAsset) :| "Invalid AssetPair"
  def bytes: Array[Byte]  = amountAsset.byteRepr ++ priceAsset.byteRepr

  def reverse: AssetPair = AssetPair(priceAsset, amountAsset)

  def assets: Set[Asset] = Set(amountAsset, priceAsset)
}

object AssetPair {

  def extractAsset(a: String): Try[Asset] = a match {
    case Asset.WavesName => Success(Waves)
    case other           => ByteStr.decodeBase58(other).map(IssuedAsset)
  }

  def extractAssetPair(s: String): Try[AssetPair] = s.split('-') match {
    case Array(amtAssetStr, prcAssetStr) =>
      AssetPair.createAssetPair(amtAssetStr, prcAssetStr).recoverWith {
        case e => Failure(new Exception(s"$s (${e.getMessage})", e))
      }

    case xs => Failure(new Exception(s"$s (incorrect assets count, expected 2 but got ${xs.length})"))
  }

  def createAssetPair(amountAsset: String, priceAsset: String): Try[AssetPair] =
    for {
      a1 <- extractAsset(amountAsset)
      a2 <- extractAsset(priceAsset)
    } yield AssetPair(a1, a2)

  def fromBytes(xs: Array[Byte]): AssetPair = {
    val (amount, offset) = deser.parseByteArrayOption(xs, 0, Asset.AssetIdLength)
    val (price, _)       = deser.parseByteArrayOption(xs, offset, Asset.AssetIdLength)
    AssetPair(
      Asset.fromCompatId(amount.map(ByteStr(_))),
      Asset.fromCompatId(price.map(ByteStr(_)))
    )
  }

  implicit val assetPairReader: ValueReader[AssetPair] = { (cfg, path) =>
    val source = cfg.getString(path)
    extractAssetPair(source).fold(e => throw e, identity)
  }

  implicit val assetPairFormat: OFormat[AssetPair] = (
    (JsPath \ "amountAsset").formatWithDefault[Asset](Waves) and (JsPath \ "priceAsset").formatWithDefault[Asset](Waves)
  )(AssetPair.apply, Function.unlift(AssetPair.unapply))

  val assetPairKeyAsStringFormat: Format[AssetPair] = Format(
    fjs = Reads {
      case JsString(x) => AssetPair.extractAssetPair(x).fold(e => JsError(e.getMessage), JsSuccess(_))
      case x           => JsError(JsPath, s"Expected a string, but got ${x.toString().take(10)}...")
    },
    tjs = Writes { x =>
      JsString(x.key)
    }
  )
} 
Example 61
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load.ws

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging {

  import system.dispatcher
  private implicit val materializer = Materializer(system)
  private val wsHandlerRef          = system.actorOf(TestWsHandlerActor.props(keepAlive = true))

  log.info(s"Connecting to Matcher WS API: $uri")

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // To server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  // To client
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage => // TODO move to tests
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x)         => Future.successful { receive(x).foreach(wsHandlerRef ! _) }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def isClosed: Boolean = closed.isCompleted
  def close(): Future[Done] = {
    if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
    closed
  }
} 
Example 62
Source File: package.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it

import com.wavesplatform.dex.domain.account.AddressScheme
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.wavesj.Transaction
import com.wavesplatform.wavesj.json.WavesJsonMapper
import com.wavesplatform.wavesj.transactions.ExchangeTransaction
import play.api.libs.json._
import play.api.libs.json.jackson.PlayJsonModule

import scala.util.{Failure, Success, Try}

package object json {

  private val mapper = new WavesJsonMapper(AddressScheme.current.chainId)
  mapper.registerModule(new PlayJsonModule(JsonParserSettings()))

  private def wavesJDeserializeTx(json: JsValue): Transaction = mapper.readValue(json.toString, classOf[Transaction])

  implicit val transactionFormat: Format[Transaction] = Format[Transaction](
    Reads { json =>
      Try(wavesJDeserializeTx(json)) match {
        case Success(x) => JsSuccess(x)
        case Failure(e) => JsError(e.getMessage)
      }
    },
    Writes(tx => Json.parse(mapper.writeValueAsString(tx)))
  )

  implicit val byteStrFormat: Format[ByteStr] = Format(
    Reads {
      case JsString(str) =>
        ByteStr.decodeBase58(str) match {
          case Success(x) => JsSuccess(x)
          case Failure(e) => JsError(e.getMessage)
        }

      case _ => JsError("Can't read ByteStr")
    },
    Writes(x => JsString(x.toString))
  )

  implicit val exchangeTxReads: Reads[ExchangeTransaction] = transactionFormat.map(_.asInstanceOf[ExchangeTransaction])

  implicit val orderWrites: Writes[Order] = Writes(_.json())

  implicit val assetPairFormat: Format[AssetPair] = AssetPair.assetPairFormat

  implicit val assetRatesReads: Reads[Map[Asset, Double]] = Reads { json =>
    json.validate[Map[String, Double]].map { assetRates =>
      assetRates.map { case (assetStr, rateValue) => AssetPair.extractAsset(assetStr).get -> rateValue }
    }
  }

  implicit val assetBalancesReads: Reads[Map[Asset, Long]] = Reads.map[Long].map { assetBalances =>
    assetBalances.map { case (assetStr, balanceValue) => AssetPair.extractAsset(assetStr).get -> balanceValue }
  }

  implicit val assetPairOffsetsReads: Reads[Map[AssetPair, Long]] = Reads { json =>
    json.validate[Map[String, Long]].map {
      _.map {
        case (assetPairStr, offset) =>
          val assetPairStrArr = assetPairStr.split("-")
          val assetPair = (
            assetPairStrArr match {
              case Array(amtAssetStr, prcAssetStr) => AssetPair.createAssetPair(amtAssetStr, prcAssetStr)
              case _                               => throw new Exception(s"$assetPairStr (incorrect assets count, expected 2 but got ${assetPairStrArr.size})")
            }
          ) fold (ex => throw new Exception(s"$assetPairStr (${ex.getMessage})"), identity)
          assetPair -> offset
      }
    }
  }
} 
Example 63
Source File: ResponseParsers.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.sttp

import com.google.common.primitives.Longs
import com.softwaremill.sttp.{DeserializationError, ResponseAs, MonadError => _, _}
import com.typesafe.config.{Config, ConfigFactory}
import play.api.libs.json.JsError

import scala.util.{Failure, Success, Try}

object ResponseParsers {

  val asUtf8String: ResponseAs[String, Nothing] = asString("UTF-8")

  def asLong: ResponseAs[Either[DeserializationError[JsError], Long], Nothing] =
    asUtf8String.map { string =>
      val r = Longs.tryParse(string)
      if (r == null) Left(DeserializationError[JsError](string, JsError("Can't parse Long"), "Can't parse Long"))
      else Right(r)
    }

  def asConfig: ResponseAs[Either[DeserializationError[JsError], Config], Nothing] =
    asUtf8String.map { string =>
      Try(ConfigFactory.parseString(string)) match {
        case Success(r) => Right(r)
        case Failure(e) => Left(DeserializationError[JsError](string, JsError("Can't parse Config"), s"Can't parse Config: ${e.getMessage}"))
      }
    }
} 
Example 64
Source File: InformativeTestStart.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.test

import java.time.{LocalDateTime, ZoneId}

import com.wavesplatform.dex.it.api.BaseContainersKit
import mouse.any._
import org.scalatest.{Args, Status, Suite}

import scala.util.{Failure, Success}

trait InformativeTestStart extends Suite { self: BaseContainersKit =>

  override protected def runTest(testName: String, args: Args): Status = {

    def print(text: String): Unit = writeGlobalLog(s"---------- [${LocalDateTime.now(ZoneId.of("UTC"))}] $text ----------")

    print(s"Test '$testName' started")

    super.runTest(testName, args) unsafeTap {
      _.whenCompleted {
        case Success(r) => print(s"Test '$testName' ${if (r) "succeeded" else "failed"}")
        case Failure(e) => print(s"Test '$testName' failed with exception '${e.getClass.getSimpleName}'")
      }
    }
  }

  protected def writeGlobalLog(x: String): Unit = {
    log.debug(x)
    knownContainers.get().foreach { _.printDebugMessage(x) }
  }
} 
Example 65
Source File: EmbeddedCassandra.scala    From phantom-activator-template   with Apache License 2.0 5 votes vote down vote up
package controllers

import java.io.File
import java.util.concurrent.atomic.AtomicBoolean

import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.slf4j.Logger

import scala.concurrent.blocking
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}


  def start(logger: Logger, config: Option[File] = None, timeout: Option[Int] = None): Unit = {
    this.synchronized {
      if (started.compareAndSet(false, true)) {
        blocking {
          val configFile = config.map(_.toURI.toString) getOrElse EmbeddedCassandraServerHelper.DEFAULT_CASSANDRA_YML_FILE
          System.setProperty("cassandra.config", configFile)
          Try {
            EmbeddedCassandraServerHelper.mkdirs()
          } match {
            case Success(value) => logger.info("Successfully created directories for embedded Cassandra.")
            case Failure(NonFatal(e)) =>
              logger.error(s"Error creating Embedded cassandra directories: ${e.getMessage}")
          }

          (config, timeout) match {
            case (Some(file), None) =>
              logger.info(s"Starting Cassandra in embedded mode with configuration from $file.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(
                file,
                EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR,
                EmbeddedCassandraServerHelper.DEFAULT_STARTUP_TIMEOUT
              )
            case (Some(file), Some(time)) =>
              logger.info(s"Starting Cassandra in embedded mode with configuration from $file and timeout set to $timeout ms.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(
                file,
                EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR,
                time
              )

            case (None, Some(time)) =>
              logger.info(s"Starting Cassandra in embedded mode with default configuration and timeout set to $timeout ms.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(time)
            case (None, None) =>
              logger.info("Starting Cassandra in embedded mode with default configuration.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra()
              logger.info("Successfully started embedded Cassandra")
          }
        }
      }
      else {
        logger.info("Embedded Cassandra has already been started")
      }
    }
  }


  def cleanup(logger: Logger): Unit = {
    this.synchronized {
      if (started.compareAndSet(true, false)) {
        logger.info("Cleaning up embedded Cassandra")
        EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
      } else {
        logger.info("Cassandra is not running, not cleaning up")
      }
    }
  }
} 
Example 66
Source File: MetricsReporter.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.metrics

import akka.actor.DynamicAccess
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.concurrent.duration.Duration
import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait MetricsReporter {
  def setDynamoDBClientPutItemDuration(duration: Duration): Unit
  def setDynamoDBClientBatchWriteItemDuration(duration: Duration): Unit
  def setDynamoDBClientUpdateItemDuration(duration: Duration): Unit
  def setDynamoDBClientDeleteItemDuration(duration: Duration): Unit
  def setDynamoDBClientQueryDuration(duration: Duration): Unit
  def setDynamoDBClientScanDuration(duration: Duration): Unit
}

object MetricsReporter {

  class None(pluginConfig: PluginConfig) extends MetricsReporter {
    override def setDynamoDBClientPutItemDuration(duration: Duration): Unit        = {}
    override def setDynamoDBClientBatchWriteItemDuration(duration: Duration): Unit = {}
    override def setDynamoDBClientUpdateItemDuration(duration: Duration): Unit     = {}
    override def setDynamoDBClientDeleteItemDuration(duration: Duration): Unit     = {}
    override def setDynamoDBClientQueryDuration(duration: Duration): Unit          = {}
    override def setDynamoDBClientScanDuration(duration: Duration): Unit           = {}
  }

}

trait MetricsReporterProvider {

  def create: Option[MetricsReporter]

}

object MetricsReporterProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): MetricsReporterProvider = {
    val className = pluginConfig.metricsReporterProviderClassName
    dynamicAccess
      .createInstanceFor[MetricsReporterProvider](
        className,
        Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig)
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize MetricsReporterProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends MetricsReporterProvider {

    def create: Option[MetricsReporter] = {
      pluginConfig.metricsReporterClassName.map { className =>
        dynamicAccess
          .createInstanceFor[MetricsReporter](
            className,
            Seq(classOf[PluginConfig] -> pluginConfig)
          ) match {
          case Success(value) => value
          case Failure(ex) =>
            throw new PluginException("Failed to initialize MetricsReporter", Some(ex))
        }
      }
    }

  }
} 
Example 67
Source File: DnsResolverProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import akka.actor.DynamicAccess
import com.amazonaws.DnsResolver
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait DnsResolverProvider {
  def create: Option[DnsResolver]
}

object DnsResolverProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): DnsResolverProvider = {
    val className = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.dnsResolverProviderClassName
    dynamicAccess
      .createInstanceFor[DnsResolverProvider](
        className,
        Seq(
          classOf[DynamicAccess] -> dynamicAccess,
          classOf[PluginConfig]  -> pluginConfig
        )
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize DnsResolverProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends DnsResolverProvider {

    override def create: Option[DnsResolver] = {
      val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.dnsResolverClassName
      classNameOpt.map { className =>
        dynamicAccess
          .createInstanceFor[DnsResolver](
            className,
            Seq(
              classOf[PluginConfig] -> pluginConfig
            )
          ) match {
          case Success(value) => value
          case Failure(ex) =>
            throw new PluginException("Failed to initialize DnsResolver", Some(ex))
        }
      }
    }
  }

} 
Example 68
Source File: ExecutionInterceptorsProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import akka.actor.DynamicAccess
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException
import software.amazon.awssdk.core.interceptor.ExecutionInterceptor

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait ExecutionInterceptorsProvider {
  def create: Seq[ExecutionInterceptor]
}

object ExecutionInterceptorsProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): ExecutionInterceptorsProvider = {
    val className = pluginConfig.clientConfig.v2ClientConfig.executionInterceptorsProviderClassName
    dynamicAccess
      .createInstanceFor[ExecutionInterceptorsProvider](
        className,
        Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig)
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize ExecutionInterceptorsProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends ExecutionInterceptorsProvider {

    override def create: Seq[ExecutionInterceptor] = {
      val classNames = pluginConfig.clientConfig.v2ClientConfig.executionInterceptorClassNames
      classNames.map { className =>
        dynamicAccess
          .createInstanceFor[ExecutionInterceptor](className, Seq(classOf[PluginConfig] -> pluginConfig)) match {
          case Success(value) => value
          case Failure(ex) =>
            throw new PluginException("Failed to initialize ExecutionInterceptor", Some(ex))
        }
      }
    }
  }

} 
Example 69
Source File: RetryPolicyProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import akka.actor.DynamicAccess
import com.amazonaws.retry.{ PredefinedRetryPolicies, RetryPolicy }
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait RetryPolicyProvider {
  def create: RetryPolicy
}

object RetryPolicyProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): Option[RetryPolicyProvider] = {
    val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.retryPolicyProviderClassName
    classNameOpt.map { className =>
      dynamicAccess
        .createInstanceFor[RetryPolicyProvider](
          className,
          Seq(
            classOf[DynamicAccess] -> dynamicAccess,
            classOf[PluginConfig]  -> pluginConfig
          )
        ) match {
        case Success(value) => value
        case Failure(ex) =>
          throw new PluginException("Failed to initialize RetryPolicyProvider", Some(ex))
      }
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends RetryPolicyProvider {

    override def create: RetryPolicy = {
      pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.maxErrorRetry
        .fold(PredefinedRetryPolicies.getDynamoDBDefaultRetryPolicy) { maxErrorRetry =>
          PredefinedRetryPolicies.getDynamoDBDefaultRetryPolicyWithCustomMaxRetries(maxErrorRetry)
        }
    }

  }

} 
Example 70
Source File: RequestHandlersProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import akka.actor.DynamicAccess
import com.amazonaws.handlers.RequestHandler2
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait RequestHandlersProvider {
  def create: Seq[RequestHandler2]
}

object RequestHandlersProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): RequestHandlersProvider = {
    val className = pluginConfig.clientConfig.v1ClientConfig.requestHandlersProviderClassName
    dynamicAccess
      .createInstanceFor[RequestHandlersProvider](
        className,
        Seq(
          classOf[DynamicAccess] -> dynamicAccess,
          classOf[PluginConfig]  -> pluginConfig
        )
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize RequestHandlersProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends RequestHandlersProvider {

    override def create: Seq[RequestHandler2] = {
      val classNames = pluginConfig.clientConfig.v1ClientConfig.requestHandlerClassNames
      classNames.map { className =>
        dynamicAccess
          .createInstanceFor[RequestHandler2](
            className,
            Seq(
              classOf[DynamicAccess] -> dynamicAccess,
              classOf[PluginConfig]  -> pluginConfig
            )
          ) match {
          case Success(value) => value
          case Failure(ex) =>
            throw new PluginException("Failed to initialize RequestHandler2", Some(ex))
        }
      }
    }
  }

} 
Example 71
Source File: MonitoringListenerProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import akka.actor.DynamicAccess
import com.amazonaws.monitoring.MonitoringListener
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait MonitoringListenerProvider {
  def create: Option[MonitoringListener]
}

object MonitoringListenerProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): MonitoringListenerProvider = {
    val className = pluginConfig.clientConfig.v1ClientConfig.monitoringListenerProviderClassName
    dynamicAccess
      .createInstanceFor[MonitoringListenerProvider](
        className,
        Seq(
          classOf[DynamicAccess] -> dynamicAccess,
          classOf[PluginConfig]  -> pluginConfig
        )
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize MonitoringListenerProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends MonitoringListenerProvider {

    override def create: Option[MonitoringListener] = {
      val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.monitoringListenerClassName
      classNameOpt.map { className =>
        dynamicAccess
          .createInstanceFor[MonitoringListener](
            className,
            Seq(
              classOf[DynamicAccess] -> dynamicAccess,
              classOf[PluginConfig]  -> pluginConfig
            )
          ) match {
          case Success(value) => value
          case Failure(ex) =>
            throw new PluginException("Failed to initialize MonitoringListener", Some(ex))
        }
      }
    }
  }

} 
Example 72
Source File: SecureRandomProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import java.security.SecureRandom

import akka.actor.DynamicAccess
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait SecureRandomProvider {
  def create: SecureRandom
}

object SecureRandomProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): SecureRandomProvider = {
    val className = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.secureRandomProviderClassName
    dynamicAccess
      .createInstanceFor[SecureRandomProvider](
        className,
        Seq(
          classOf[DynamicAccess] -> dynamicAccess,
          classOf[PluginConfig]  -> pluginConfig
        )
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize SecureRandomProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends SecureRandomProvider {
    override def create: SecureRandom = new SecureRandom()
  }
} 
Example 73
Source File: RequestMetricCollectorProvider.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1

import akka.actor.DynamicAccess
import com.amazonaws.metrics.RequestMetricCollector
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException

import scala.collection.immutable._
import scala.util.{ Failure, Success }

trait RequestMetricCollectorProvider {
  def create: Option[RequestMetricCollector]
}

object RequestMetricCollectorProvider {

  def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): RequestMetricCollectorProvider = {
    val className = pluginConfig.clientConfig.v1ClientConfig.requestMetricCollectorProviderClassName
    dynamicAccess
      .createInstanceFor[RequestMetricCollectorProvider](
        className,
        Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig)
      ) match {
      case Success(value) => value
      case Failure(ex) =>
        throw new PluginException("Failed to initialize RequestMetricCollectorProvider", Some(ex))
    }
  }

  final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends RequestMetricCollectorProvider {

    override def create: Option[RequestMetricCollector] = {
      val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.requestMetricCollectorClassName
      classNameOpt.map { className =>
        dynamicAccess
          .createInstanceFor[RequestMetricCollector](
            className,
            Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig)
          ) match {
          case Success(value) => value
          case Failure(ex) =>
            throw new PluginException("Failed to initialize RequestMetricCollector", Some(ex))
        }
      }
    }
  }

} 
Example 74
Source File: ByteArrayJournalSerializer.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.serialization

import akka.persistence.PersistentRepr
import akka.serialization.Serialization
import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }

import scala.util.{ Failure, Success }

class ByteArrayJournalSerializer(serialization: Serialization, separator: String)
    extends FlowPersistentReprSerializer[JournalRow] {

  override def serialize(
      persistentRepr: PersistentRepr,
      tags: Set[String],
      index: Option[Int]
  ): Either[Throwable, JournalRow] = {
    serialization
      .serialize(persistentRepr)
      .map(
        JournalRow(
          PersistenceId(persistentRepr.persistenceId),
          SequenceNumber(persistentRepr.sequenceNr),
          persistentRepr.deleted,
          _,
          System.currentTimeMillis(),
          encodeTags(tags, separator)
        )
      ) match {
      case Success(value) => Right(value)
      case Failure(ex)    => Left(ex)
    }
  }

  override def deserialize(journalRow: JournalRow): Either[Throwable, (PersistentRepr, Set[String], Long)] = {
    serialization
      .deserialize(journalRow.message, classOf[PersistentRepr])
      .map((_, decodeTags(journalRow.tags, separator), journalRow.ordering)) match {
      case Success(value) => Right(value)
      case Failure(ex)    => Left(ex)
    }
  }
} 
Example 75
Source File: FlowPersistentReprSerializer.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.serialization

import akka.NotUsed
import akka.persistence.PersistentRepr
import akka.stream.scaladsl.Flow

import scala.util.{ Failure, Success, Try }

trait FlowPersistentReprSerializer[T] extends PersistentReprSerializer[T] {

  def deserializeFlow: Flow[T, (PersistentRepr, Set[String], Long), NotUsed] = {
    Flow[T].map(deserialize).map {
      case Right(r) => r
      case Left(ex) => throw ex
    }
  }

  def deserializeFlowWithoutTags: Flow[T, PersistentRepr, NotUsed] = {
    deserializeFlow.map(keepPersistentRepr)
  }

  // ---

  def deserializeFlowAsEither: Flow[T, Either[Throwable, (PersistentRepr, Set[String], Long)], NotUsed] = {
    Flow[T].map(deserialize)
  }

  def deserializeFlowWithoutTagsAsEither: Flow[T, Either[Throwable, PersistentRepr], NotUsed] = {
    deserializeFlowAsEither.map {
      case Right(v) => Right(keepPersistentRepr(v))
      case Left(ex) => Left(ex)
    }
  }

  // ---

  def deserializeFlowAsTry: Flow[T, Try[(PersistentRepr, Set[String], Long)], NotUsed] = {
    Flow[T].map(deserialize).map {
      case Right(v) => Success(v)
      case Left(ex) => Failure(ex)
    }
  }

  def deserializeFlowWithoutTagsAsTry: Flow[T, Try[PersistentRepr], NotUsed] = {
    deserializeFlowAsTry.map(_.map(keepPersistentRepr))
  }

  private def keepPersistentRepr(tup: (PersistentRepr, Set[String], Long)): PersistentRepr = tup match {
    case (repr, _, _) => repr
  }

} 
Example 76
Source File: ByteArraySnapshotSerializer.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.serialization

import akka.persistence.SnapshotMetadata
import akka.persistence.serialization.Snapshot
import akka.serialization.Serialization
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }
import com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao.SnapshotRow

import scala.util.{ Failure, Success }

class ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] {

  override def serialize(
      metadata: SnapshotMetadata,
      snapshot: Any
  ): Either[Throwable, SnapshotRow] = {
    serialization
      .serialize(Snapshot(snapshot))
      .map(
        SnapshotRow(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr), metadata.timestamp, _)
      ) match {
      case Success(value) => Right(value)
      case Failure(ex)    => Left(ex)
    }
  }

  override def deserialize(snapshotRow: SnapshotRow): Either[Throwable, (SnapshotMetadata, Any)] = {
    serialization
      .deserialize(snapshotRow.snapshot, classOf[Snapshot])
      .map(snapshot => {
        val snapshotMetadata =
          SnapshotMetadata(snapshotRow.persistenceId.asString, snapshotRow.sequenceNumber.value, snapshotRow.created)
        (snapshotMetadata, snapshot.data)
      }) match {
      case Success(value) => Right(value)
      case Failure(ex)    => Left(ex)
    }
  }

} 
Example 77
Source File: ByteArraySnapshotSerializer.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao

import akka.persistence.SnapshotMetadata
import akka.persistence.serialization.Snapshot
import akka.serialization.Serialization
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }

import scala.util.{ Failure, Success }

trait SnapshotSerializer[T] {
  def serialize(metadata: SnapshotMetadata, snapshot: Any): Either[Throwable, T]

  def deserialize(t: T): Either[Throwable, (SnapshotMetadata, Any)]
}

class ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] {

  override def serialize(
      metadata: SnapshotMetadata,
      snapshot: Any
  ): Either[Throwable, SnapshotRow] = {
    serialization
      .serialize(Snapshot(snapshot))
      .map(
        SnapshotRow(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr), metadata.timestamp, _)
      ) match {
      case Success(value) => Right(value)
      case Failure(ex)    => Left(ex)
    }
  }

  override def deserialize(snapshotRow: SnapshotRow): Either[Throwable, (SnapshotMetadata, Any)] = {
    serialization
      .deserialize(snapshotRow.snapshot, classOf[Snapshot])
      .map(snapshot => {
        val snapshotMetadata =
          SnapshotMetadata(snapshotRow.persistenceId.asString, snapshotRow.sequenceNumber.value, snapshotRow.created)
        (snapshotMetadata, snapshot.data)
      }) match {
      case Success(value) => Right(value)
      case Failure(ex)    => Left(ex)
    }
  }
} 
Example 78
Source File: Http.scala    From AI   with Apache License 2.0 5 votes vote down vote up
package com.bigchange.http

import com.bigchange.log.CLogger
import dispatch.Defaults._
import dispatch._

import scala.collection.mutable
import scala.util.{Failure, Success}


  def post(strUrl:String, parameters:mutable.HashMap[String,String], parse: String): Unit = {

    val post = url(strUrl) << parameters
    val response : Future[String] = Http(post OK as.String)

    response onComplete {
      case Success(content) =>
        // parse(content)
        println("post Success content:"+content)
      case Failure(t) =>
        println("post Failure content:"+t)
    }
  }
} 
Example 79
Source File: FlowErrorTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.flow

import akka.stream.ActorAttributes.supervisionStrategy
import akka.stream.Supervision.resumingDecider
import akka.stream.scaladsl._
import akka.stream.testkit.scaladsl._
import com.github.dnvriend.streams.TestSpec

import scala.concurrent.Future
import scala.util.{ Failure, Success, Try }

class FlowErrorTest extends TestSpec {

  "Error stream" should "" in {
  }

  //  it should "stop the stream" in {
  //    Source(Future[String](throw new RuntimeException("Test")))
  //      .withAttributes(supervisionStrategy(resumingDecider))
  //      .map { x => println(x); x }
  //      .runWith(TestSink.probe[String])
  //      .request(1)
  //      .expectError()
  //  }

  it should "resume with no result for the failed future" in {
    val t = new RuntimeException("Test")
    Source(List(1, 2, 3))
      .log("before")
      .mapAsync(3) { x ⇒
        Future {
          if (x == 2) throw t else x
        }
      }
      .withAttributes(supervisionStrategy(resumingDecider))
      .log("after")
      .runWith(TestSink.probe[Int])
      .request(4)
      
      .expectNext(Success(1))
      .expectNext(Failure(t))
      .expectNext(Success(3))
      .expectComplete()
  }
} 
Example 80
Source File: SimpleScalaRiakDataframesExample.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.spark.examples.dataframes

import com.basho.riak.client.core.query.indexes.LongIntIndex
import com.basho.riak.client.core.query.Namespace
import com.basho.riak.spark._
import com.basho.riak.spark.util.RiakObjectConversionUtil
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import scala.reflect.runtime.universe
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{ Failure, Success }
import com.basho.riak.client.core.query.RiakObject
import com.basho.riak.client.api.RiakClient
import com.basho.riak.client.core.query.Location
import com.basho.riak.spark.rdd.RiakFunctions


object SimpleScalaRiakDataframesExample {
  private val bucketName = "users"

  case class UserData(user_id: String, name: String, age: Int, category: String)

  val testData = Seq(
    UserData("u1", "Ben", 23, "CategoryA"),
    UserData("u2", "Clair", 19, "CategoryB"),
    UserData("u3", "John", 21, null),
    UserData("u4", "Chris", 50, "Categoryc"),
    UserData("u5", "Mary", 15, "CategoryB"),
    UserData("u6", "George", 31, "CategoryC")
  )

  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setAppName("Riak Spark Dataframes Example")

    setSparkOpt(sparkConf, "spark.master", "local")
    setSparkOpt(sparkConf, "spark.riak.connection.host", "127.0.0.1:8087")

    val sc = new SparkContext(sparkConf)

    // Work with clear bucket
    clearBucket(sparkConf)

    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    // To enable toDF()
    import sqlContext.implicits._

    println(s" Saving data to Riak: \n ${println(testData)}")

    // Save test data from json file to riak bucket
    val inputRDD = sc.parallelize(testData).map {
      line =>
        val obj = RiakObjectConversionUtil.to(line)
        // RiakObjectConversionUtil.to() sets content type to text/plain if String is passed
        // Overwriting content type to application/json will allow automatic conversion to 
        // User defined type when reading from Riak
        obj.setContentType("application/json")
        obj
    }.saveToRiak(bucketName)

    // Read from Riak with UDT to enable schema inference using reflection
    val df = sc.riakBucket[UserData](bucketName).queryAll.toDF

    println(s"Dataframe from Riak query: \n ${df.show()}")

    df.registerTempTable("users")

    println("count by category")
    df.groupBy("category").count.show

    println("sort by num of letters")
    // Register user defined function
    sqlContext.udf.register("stringLength", (s: String) => s.length)
    sqlContext.sql("select user_id, name, stringLength(name) nameLength from users order by nameLength").show

    println("filter age >= 21")
    sqlContext.sql("select * from users where age >= 21").show

  }

  private def clearBucket(sparkConf: SparkConf): Unit = {
    val rf = RiakFunctions(sparkConf)
    rf.withRiakDo(session => {
      rf.resetAndEmptyBucketByName(bucketName)
    })
  }

  private def setSparkOpt(sparkConf: SparkConf, option: String, defaultOptVal: String): SparkConf = {
    val optval = sparkConf.getOption(option).getOrElse(defaultOptVal)
    sparkConf.set(option, optval)
  }

} 
Example 81
Source File: BackendConnector.scala    From nisp-frontend   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.nisp.connectors

import play.api.data.validation.ValidationError
import play.api.libs.json.{Format, JsObject, JsPath}
import uk.gov.hmrc.http.cache.client.SessionCache
import uk.gov.hmrc.nisp.models.enums.APIType._
import uk.gov.hmrc.nisp.services.MetricsService
import uk.gov.hmrc.nisp.utils.JsonDepersonaliser
import scala.concurrent.ExecutionContext.Implicits.global

import scala.concurrent.Future
import scala.util.{Failure, Success}
import uk.gov.hmrc.http.{ HeaderCarrier, HttpGet, HttpResponse }

trait BackendConnector {

  def http: HttpGet
  def serviceUrl: String
  def sessionCache: SessionCache
  val metricsService: MetricsService

  protected def retrieveFromCache[A](api: APIType, url: String)(implicit hc: HeaderCarrier, formats: Format[A]): Future[A] = {
    val keystoreTimerContext = metricsService.keystoreReadTimer.time()

    val sessionCacheF = sessionCache.fetchAndGetEntry[A](api.toString)
    sessionCacheF.onFailure {
      case _ => metricsService.keystoreReadFailed.inc()
    }
    sessionCacheF.flatMap { keystoreResult =>
      keystoreTimerContext.stop()
      keystoreResult match {
        case Some(data) =>
          metricsService.keystoreHitCounter.inc()
          Future.successful(data)
        case None =>
          metricsService.keystoreMissCounter.inc()
          connectToMicroservice[A](url, api) map {
            data: A => cacheResult(data, api.toString)
          }
      }
    }
  }

  private def connectToMicroservice[A](urlToRead: String, apiType: APIType)(implicit hc: HeaderCarrier, formats: Format[A]): Future[A] = {
    val timerContext = metricsService.startTimer(apiType)

    val httpResponseF = http.GET[HttpResponse](urlToRead)
    httpResponseF onSuccess {
      case _ => timerContext.stop()
    }
    httpResponseF onFailure {
      case _ => metricsService.incrementFailedCounter(apiType)
    }
    httpResponseF.map {
      httpResponse => httpResponse.json.validate[A].fold(
        errs => {
          val json = JsonDepersonaliser.depersonalise(httpResponse.json) match {
            case Success(s) => s"Depersonalised JSON\n$s"
            case Failure(e) => s"JSON could not be depersonalised\n${e.toString()}"
          }
          throw new JsonValidationException(s"Unable to deserialise $apiType: ${formatJsonErrors(errs)}\n$json")
        },
        valid => valid
      )
    }
  }

  private def cacheResult[A](a:A,name: String)(implicit hc: HeaderCarrier, formats: Format[A]): A = {
    val timerContext = metricsService.keystoreWriteTimer.time()
    val cacheF = sessionCache.cache[A](name, a)
    cacheF.onSuccess {
      case _ => timerContext.stop()
    }
    cacheF.onFailure {
      case _ => metricsService.keystoreWriteFailed.inc()
    }
    a
  }

  private def formatJsonErrors(errors: Seq[(JsPath, Seq[ValidationError])]): String = {
    errors.map(p => p._1 + " - " + p._2.map(e => removeJson(e.message)).mkString(",")).mkString(" | ")
  }

  private def removeJson(message: String): String = {
    message.indexOf("{") match {
      case i if i != -1  => message.substring(0, i - 1) + " [JSON removed]"
      case _ => message
    }
  }

  private[connectors] class JsonValidationException(message: String) extends Exception(message)
} 
Example 82
Source File: BackendConnectorSpec.scala    From nisp-frontend   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.nisp.connectors

import org.mockito.Mockito.when
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.mock.MockitoSugar
import play.api.libs.json.Json
import uk.gov.hmrc.http.cache.client.SessionCache
import uk.gov.hmrc.nisp.helpers.{MockMetricsService, MockSessionCache}
import uk.gov.hmrc.nisp.models.NationalInsuranceRecord
import uk.gov.hmrc.nisp.models.enums.APIType
import uk.gov.hmrc.nisp.services.MetricsService
import uk.gov.hmrc.nisp.utils.JsonDepersonaliser
import uk.gov.hmrc.play.test.UnitSpec

import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
import uk.gov.hmrc.http.{HeaderCarrier, HttpGet, HttpResponse}

class BackendConnectorSpec extends UnitSpec with MockitoSugar with ScalaFutures {

  val mockHttp: HttpGet = mock[HttpGet]
  
  object BackendConnectorImpl extends BackendConnector {
    override def http: HttpGet = mockHttp
    override def sessionCache: SessionCache = MockSessionCache
    override def serviceUrl: String = "national-insurance"
    override val metricsService: MetricsService = MockMetricsService

    def getNationalInsurance()(implicit headerCarrier: HeaderCarrier): Future[NationalInsuranceRecord] = {
      val urlToRead = s"$serviceUrl/ni"
      retrieveFromCache[NationalInsuranceRecord](APIType.NationalInsurance, urlToRead)(headerCarrier, NationalInsuranceRecord.formats)
    }
  }

  implicit val headerCarrier = HeaderCarrier(extraHeaders = Seq("Accept" -> "application/vnd.hmrc.1.0+json"))

  "connectToMicroservice" should {
    "should return depersonalised JSON" in {
      val json = Json.obj(
        "qualifyingYearsPriorTo1975" -> 0,
        "numberOfGaps" -> 6,
        "numberOfGapsPayable" -> 4,
        "dateOfEntry" -> "1975-08-01",
        "homeResponsibilitiesProtection" -> false,
        "earningsIncludedUpTo" -> "2016-04-05",
        "_embedded" -> Json.obj(
          "taxYears" -> Json.arr()
        )
      )

      val depersonalisedJson =  JsonDepersonaliser.depersonalise(json) match {
        case Success(s) => s
        case Failure(_) => fail()
      }

      val Ok = 200
      val response = Future(HttpResponse(Ok, Option.apply(json)))
      when(mockHttp.GET[HttpResponse]("national-insurance/ni")).thenReturn(response)

      val future: Future[NationalInsuranceRecord] = BackendConnectorImpl.getNationalInsurance()

      whenReady(future.failed) {
        t: Throwable =>
          t.getMessage.contains(depersonalisedJson) shouldBe true
          t.getMessage.contains("2016-04-05") shouldBe false
      }
    }
  }

} 
Example 83
Source File: Main.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import cats.implicits._
import com.sky.BuildInfo
import com.sky.kafka.configurator.error.ConfiguratorFailure
import com.typesafe.scalalogging.LazyLogging
import org.zalando.grafter._

import scala.util.{Failure, Success, Try}

object Main extends LazyLogging {

  def main(args: Array[String]): Unit = {
    logger.info(s"Running ${BuildInfo.name} ${BuildInfo.version} with args: ${args.mkString(", ")}")

    run(args, sys.env) match {
      case Success((errors, infoLogs)) =>
        errors.foreach(e => logger.warn(s"${e.getMessage}. Cause: ${e.getCause.getMessage}"))
        infoLogs.foreach(msg => logger.info(msg))
        if (errors.isEmpty) System.exit(0) else System.exit(1)
      case Failure(t) =>
        logger.error(t.getMessage)
        System.exit(1)
    }
  }

  def run(args: Array[String], envVars: Map[String, String]): Try[(List[ConfiguratorFailure], List[String])] =
    ConfigParsing.parse(args, envVars).flatMap { conf =>
      val app = KafkaConfiguratorApp.reader(conf)
      val result = app.configureTopicsFrom(conf.files.toList)
      stop(app)
      result
    }

  private def stop(app: KafkaConfiguratorApp): Unit =
    Rewriter.stop(app).value.foreach {
      case StopOk(msg) => logger.debug(s"Component stopped: $msg")
      case StopError(msg, ex) => logger.warn(s"Error whilst stopping component: $msg", ex)
      case StopFailure(msg) => logger.warn(s"Failure whilst stopping component: $msg")
    }
} 
Example 84
Source File: TopicConfigurator.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import cats.Eq
import cats.data.Reader
import cats.instances.int._
import cats.instances.vector._
import cats.instances.try_._
import cats.syntax.eq._
import com.sky.kafka.configurator.error.{ReplicationChangeFound, TopicNotFound}
import com.typesafe.scalalogging.LazyLogging

import scala.util.control.NonFatal
import scala.util.{Failure, Success}

case class TopicConfigurator(topicReader: TopicReader, topicWriter: TopicWriter) extends LazyLogging {

  def configure(topic: Topic): Logger[Unit] =
    topicReader.fetch(topic.name) match {
      case Success(currentTopic) =>
        updateTopic(currentTopic, topic)
      case Failure(TopicNotFound(_)) =>
        topicWriter.create(topic)
          .withLog(s"Topic ${topic.name} was not found, so it has been created")
      case Failure(NonFatal(t)) =>
        Failure(t).asWriter
    }

  private def updateTopic(oldTopic: Topic, newTopic: Topic): Logger[Unit] = {

    def ifDifferent[T: Eq](oldValue: T, newValue: T)(updateOperation: (Topic, Topic) => Logger[Unit])(messageIfSame: String): Logger[Unit] =
      if (oldValue =!= newValue)
        updateOperation(oldTopic, newTopic)
      else
        Success(()).withLog(messageIfSame)

    import TopicConfigurator._

    for {
      _ <- ifDifferent(oldTopic.replicationFactor, newTopic.replicationFactor)(failReplicationChange)(s"Replication factor unchanged for ${newTopic.name}.")
      _ <- ifDifferent(oldTopic.partitions, newTopic.partitions)(updatePartitions)(s"No change in number of partitions for ${newTopic.name}")
      _ <- ifDifferent(oldTopic.config, newTopic.config)(updateConfig)(s"No change in config for ${newTopic.name}")
    } yield ()
  }

  private def failReplicationChange(oldTopic: Topic, newTopic: Topic): Logger[Unit] =
    Failure(ReplicationChangeFound).asWriter

  private def updatePartitions(oldTopic: Topic, newTopic: Topic): Logger[Unit] =
    topicWriter
      .updatePartitions(newTopic.name, newTopic.partitions)
      .withLog(s"Updated topic ${newTopic.name} from ${oldTopic.partitions} to ${newTopic.partitions} partition(s)")

  private def updateConfig(oldTopic: Topic, newTopic: Topic): Logger[Unit] =
    topicWriter
      .updateConfig(newTopic.name, newTopic.config)
      .withLog(s"Updated configuration of topic ${newTopic.name}")
}

object TopicConfigurator {
  def reader: Reader[AppConfig, TopicConfigurator] = KafkaTopicAdmin.reader
    .map(kafkaAdminClient => TopicConfigurator(kafkaAdminClient, kafkaAdminClient))

  private implicit val topicConfigIsContained: Eq[Map[String, String]] = Eq.instance { case (left, right) =>
    left.toList.forall(right.toList.contains(_)) || right.toList.forall(left.toList.contains(_))
  }
} 
Example 85
Source File: KafkaConfiguratorApp.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import java.io.{File, FileReader}

import cats.data.Reader
import cats.implicits._
import com.sky.kafka.configurator.error.ConfiguratorFailure

import scala.util.{Failure, Success, Try}

case class KafkaConfiguratorApp(configurator: TopicConfigurator) {

  def configureTopicsFrom(files: List[File]): Try[(List[ConfiguratorFailure], List[String])] =
    files.traverse { file =>
      for {
        fileReader <- Try(new FileReader(file))
        topics <- TopicConfigurationParser(fileReader).toTry
      } yield configureAll(topics)
    }.map(_.separate.bimap(_.flatten, _.flatten))

  private def configureAll(topics: List[Topic]): (List[ConfiguratorFailure], List[String]) = {
    val (errors, allLogs) = topics.map { topic =>
      configurator.configure(topic).run match {
        case Success((logs, _)) => Right(logs)
        case Failure(t) => Left(ConfiguratorFailure(topic.name, t))
      }
    }.separate
    (errors, allLogs.flatten)
  }
}

object KafkaConfiguratorApp {
  def reader: Reader[AppConfig, KafkaConfiguratorApp] =
    TopicConfigurator.reader.map(KafkaConfiguratorApp.apply)
} 
Example 86
Source File: KafkaTopicAdmin.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import java.util.concurrent.ExecutionException

import cats.data.Reader
import com.sky.kafka.configurator.error.TopicNotFound
import org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG
import org.apache.kafka.clients.admin._
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException
import org.zalando.grafter.{ Stop, StopResult }

import scala.collection.JavaConverters._
import scala.language.postfixOps
import scala.util.{ Failure, Success, Try }

object KafkaTopicAdmin {
  def apply(adminClient: AdminClient): KafkaTopicAdmin = new KafkaTopicAdmin(adminClient)

  def reader: Reader[AppConfig, KafkaTopicAdmin] = Reader { config =>
    import com.sky.kafka.utils.MapToJavaPropertiesConversion.mapToProperties
    KafkaTopicAdmin(AdminClient.create(Map(BOOTSTRAP_SERVERS_CONFIG -> config.bootstrapServers) ++ config.props))
  }
}

class KafkaTopicAdmin(ac: AdminClient) extends TopicReader with TopicWriter with Stop {

  override def fetch(topicName: String) = {

    def topicDescription = Try {
      val allDescriptions = ac.describeTopics(Seq(topicName).asJava).all.get
      allDescriptions.get(topicName)
    } match {
      case Success(result) => Success(result)
      case Failure(e: ExecutionException) if e.getCause.isInstanceOf[UnknownTopicOrPartitionException] => Failure(TopicNotFound(topicName))
      case other => other
    }

    def topicConfig = Try {
      val allConfigs = ac.describeConfigs(Seq(configResourceForTopic(topicName)).asJava).all.get
      allConfigs.get(configResourceForTopic(topicName))
    }

    for {
      desc <- topicDescription
      partitions = desc.partitions().size()
      replicationFactor = desc.partitions().asScala.head.replicas().size()
      config <- topicConfig
    } yield Topic(desc.name(), partitions, replicationFactor, config)

  }

  override def create(topic: Topic) = Try {
    val newTopic = new NewTopic(topic.name, topic.partitions, topic.replicationFactor.toShort).configs(topic.config.asJava)
    ac.createTopics(Seq(newTopic).asJava).all().get
  }

  override def updateConfig(topicName: String, config: Map[String, Object]) = Try {
    val c = config.map {
      case (key, value) => new ConfigEntry(key, value.toString)
    }.toList.asJava
    ac.alterConfigs(Map(configResourceForTopic(topicName) -> new Config(c)).asJava).all().get
  }

  override def updatePartitions(topicName: String, numPartitions: Int) = Try {
    ac.createPartitions(Map(topicName -> NewPartitions.increaseTo(numPartitions)).asJava).all().get()
  }

  override def stop = StopResult.eval("KafkaAdminClient")(ac.close())

  private def configResourceForTopic(topicName: String) = new ConfigResource(ConfigResource.Type.TOPIC, topicName)

  private implicit def kafkaConfigToMap(config: Config): Map[String, String] = config.entries().asScala.map { entry =>
    entry.name() -> entry.value()
  } toMap
} 
Example 87
Source File: KafkaConfiguratorAppSpec.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import java.io.{File, FileReader}

import com.sky.kafka.configurator.error.{ConfiguratorFailure, TopicNotFound}
import common.BaseSpec
import io.circe.generic.AutoDerivation
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar

import scala.util.{Failure, Success}

class KafkaConfiguratorAppSpec extends BaseSpec with MockitoSugar with AutoDerivation {

  val topicConfigurator = mock[TopicConfigurator]
  val kafkaConfiguratorApp = KafkaConfiguratorApp(topicConfigurator)

  it should "provide logs and errors when file has been parsed successfully" in {
    val file = new File(getClass.getResource("/topic-configuration-with-error.yml").getPath)
    val topics = TopicConfigurationParser(new FileReader(file)).right.value

    val error = TopicNotFound(topics(1).name)

    when(topicConfigurator.configure(topics.head))
      .thenReturn(Success(()).withLog("foo"))
    when(topicConfigurator.configure(topics(1)))
      .thenReturn(Failure[Unit](error).asWriter)
    when(topicConfigurator.configure(topics(2)))
      .thenReturn(Success(()).withLog("bar"))

    kafkaConfiguratorApp.configureTopicsFrom(List(file)) shouldBe Success((
      List(ConfiguratorFailure(topics.tail.head.name, error)),
      List("foo", "bar")
    ))
  }

  it should "succeed when given empty configuration file" in {
    val invalidFile = File.createTempFile("empty", "yml")
    invalidFile.deleteOnExit()
    kafkaConfiguratorApp.configureTopicsFrom(List(invalidFile)) shouldBe a[Success[_]]
  }

  it should "fail-fast when the file does not exist" in {
    kafkaConfiguratorApp.configureTopicsFrom(List(new File("does-not-exist"))) shouldBe a[Failure[_]]
  }

} 
Example 88
Source File: JsonMatchers.scala    From scalatest-json   with Apache License 2.0 5 votes vote down vote up
package com.stephenn.scalatest.jsonassert

import org.scalatest.matchers.MatchResult
import org.skyscreamer.jsonassert.{JSONCompare, JSONCompareMode}

import scala.util.{Failure, Success, Try}
import org.scalatest.matchers.Matcher

trait JsonMatchers {

  
  def matchJson(right: String): Matcher[String] =
    Matcher[String] { left =>
      Try(
        JSONCompare
          .compareJSON(right, left, JSONCompareMode.STRICT)
      ) match {
        case Failure(_) =>
          MatchResult(
            matches = false,
            rawFailureMessage = "Could not parse json {0} did not equal {1}",
            rawNegatedFailureMessage = "Json should not have matched {0} {1}",
            args = IndexedSeq(left.trim, right.trim)
          )
        case Success(jSONCompareResult) =>
          MatchResult(
            matches = jSONCompareResult.passed(),
            rawFailureMessage =
              "Json did not match {0} did not match {1}\n\nJson Diff:\n{2}",
            rawNegatedFailureMessage =
              "Json should not have matched {0} matched {1}\n\nJson Diff:\n{2}",
            args =
              IndexedSeq(left.trim, right.trim, jSONCompareResult.getMessage)
          )
      }
    }
}

object JsonMatchers extends JsonMatchers 
Example 89
Source File: akkaHttp.scala    From sup   with Apache License 2.0 5 votes vote down vote up
package sup.modules

import akka.http.scaladsl.marshalling.ToEntityMarshaller
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives.{path => akkaPath, _}
import akka.http.scaladsl.server.Route
import cats.effect.Effect
import cats.syntax.functor._
import cats.syntax.reducible._
import cats.~>
import cats.Functor
import cats.Reducible
import sup.HealthCheck
import sup.HealthResult

import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
import akka.http.scaladsl.model.HttpRequest

object akkahttp {

  
  def healthCheckRoutes[F[_]: Effect, H[_]: Reducible](
    healthCheck: HealthCheck[F, H],
    path: String = "health-check"
  )(
    implicit marshaller: ToEntityMarshaller[HealthResult[H]]
  ): Route =
    akkaPath(path) {
      get {
        onComplete(Effect[F].toIO(healthCheckResponse(healthCheck)).unsafeToFuture()) {
          case Success(response) => complete(response)
          case Failure(error)    => failWith(error)
        }
      }
    }

  def healthCheckResponse[F[_]: Functor, H[_]: Reducible](
    healthCheck: HealthCheck[F, H]
  ): F[(StatusCode, HealthResult[H])] =
    healthCheck.check.map { check =>
      if (check.value.reduce.isHealthy) StatusCodes.OK -> check
      else StatusCodes.ServiceUnavailable -> check
    }

  def healthCheckRoutesWithContext[F[_]: Functor, H[_]: Reducible, R](
    healthCheck: HealthCheck[F, H],
    path: String = "health-check"
  )(
    run: HttpRequest => F ~> Future
  )(
    implicit marshaller: ToEntityMarshaller[HealthResult[H]]
  ): Route =
    akkaPath(path) {
      get {
        extractRequest { request =>
          onComplete(run(request)(healthCheckResponse(healthCheck))) {
            case Success(response) => complete(response)
            case Failure(error)    => failWith(error)
          }
        }
      }
    }
} 
Example 90
Source File: DatasetManagerService.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.server

import it.gov.daf.catalogmanager.MetaCatalog
import it.gov.daf.catalogmanager.client.Catalog_managerClient
import it.gov.daf.datasetmanager._
import it.gov.daf.server.storage.StorageManagerClient
import play.api.libs.ws.WSClient

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Try}
import json._

class DatasetManagerService(
  catalogUrl: String,
  storageUrl: String,
  ws: WSClient
)(implicit val ec: ExecutionContext) {

  private val catalogService = new Catalog_managerClient(ws)(catalogUrl)
  private val storageManager = new StorageManagerClient(storageUrl, ws)

  def getDatasetSchema(authorization: String, datasetId: String): Future[Dataset] = {
    val result = catalogService.datasetcatalogbyid(authorization, datasetId)
      .flatMap(c => Future.fromTry(extractParams(c)))
      .flatMap(storageManager.datasetSchema(authorization, _))

    result.map(_.as[Dataset])
  }

  def getDataset(authorization: String, datasetId: String): Future[Dataset] = {
    val result = catalogService.datasetcatalogbyid(authorization, datasetId)
      .flatMap(c => Future.fromTry(extractParams(c)))
      .flatMap(storageManager.dataset(authorization, _))

    result.map(_.as[Dataset])
  }

  def getDataset(authorization: String, datasetId: String, numRows: Int): Future[Dataset] = {
    val result = catalogService.datasetcatalogbyid(authorization, datasetId)
      .flatMap(c => Future.fromTry(extractParams(c)))
      .flatMap(storageManager.dataset(authorization, _))

    result.map(_.as[Dataset])
  }

  def searchDataset(authorization: String, datasetId: String, query: Query): Future[Dataset] = {
    val result = catalogService.datasetcatalogbyid(authorization, datasetId)
      .flatMap(c => Future.fromTry(extractParams(c)))
      .map(params => params ++ transform(query))
      .flatMap(storageManager.search(authorization, _))

    result.map(_.as[Dataset])
  }

  private def extractParams(catalog: MetaCatalog): Try[Map[String, String]] = {
    catalog.operational.storage_info match {
      case Some(storage) =>
        if (storage.hdfs.isDefined) {
          Try(
            Map(
              "protocol" -> "hdfs",
              "path" -> storage.hdfs.flatMap(_.path).get
            )
          )
        } else if (storage.kudu.isDefined) {
          Try(
            Map(
              "protocol" -> "kudu",
              "table" -> storage.kudu.flatMap(_.table_name).get
            )
          )
        } else if (storage.hbase.isDefined) {
          Try(
            Map(
              "protocol" -> "opentsdb",
              "metric" -> storage.hbase.flatMap(_.metric).get,
              //FIXME right now it encodes a list a as comma separated
              "tags" -> storage.hbase.flatMap(_.tags).get.mkString(","),
              //FIXME how to encode the interval?
              "interval" -> ""
            )
          )
        } else Failure(new IllegalArgumentException("no storage configured into catalog.operational field"))

      case None =>
        Failure(new IllegalArgumentException("no storage_info configured"))
    }
  }

  private def extractParams(catalog: MetaCatalog, numRows: Int = 100): Try[Map[String, String]] = {
    extractParams(catalog)
      .map(_ + ("limit" -> numRows.toString))
  }

  //FIXME this will be changed in the next sprint
  private def transform(query: Query): Map[String, String] = Map.empty
} 
Example 91
Source File: AbstractController.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import it.gov.daf.common.authentication.Authentication
import it.gov.daf.common.config.Read
import javax.security.auth.login.LoginContext
import org.apache.hadoop.security.UserGroupInformation
import org.pac4j.play.store.PlaySessionStore
import play.api.Configuration
import play.api.mvc._

import scala.util.{ Failure, Success, Try }


abstract class AbstractController(protected val configuration: Configuration, val playSessionStore: PlaySessionStore) extends Controller {

  private def prepareEnv() = Try { System.setProperty("javax.security.auth.useSubjectCredsOnly", "false") }

  private def loginUserFromConf = for {
    user <- Read.string { "kerberos.principal" }.!
    path <- Read.string { "kerberos.keytab"    }.!
  } yield UserGroupInformation.loginUserFromKeytab(user, path)

  private def prepareAuth() = Try { Authentication(configuration, playSessionStore) }

  private def initUser() = for {
    _ <- prepareEnv()
    _ <- loginUserFromConf.read { configuration }
    _ <- prepareAuth()
  } yield UserGroupInformation.getLoginUser

  protected implicit val proxyUser = initUser() match {
    case Success(null)  => throw new RuntimeException("Unable to initialize user for application")
    case Success(user)  => user
    case Failure(error) => throw new RuntimeException("Unable to initialize user for application", error)
  }

} 
Example 92
Source File: KuduController.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import org.apache.kudu.spark.kudu._
import org.apache.spark.sql.{ DataFrame, SparkSession }
import org.slf4j.{ Logger, LoggerFactory }

import scala.util.{ Failure, Try }

class KuduController(sparkSession: SparkSession, master: String) {

  val alogger: Logger = LoggerFactory.getLogger(this.getClass)

  def readData(table: String): Try[DataFrame] =  Try{
    sparkSession
      .sqlContext
      .read
      .options(Map("kudu.master" -> master, "kudu.table" -> table)).kudu
  }.recoverWith {
    case ex =>
      alogger.error(s"Exception ${ex.getMessage}\n ${ex.getStackTrace.mkString("\n")} ")
      Failure(ex)
  }
} 
Example 93
Source File: QueryExecution.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import akka.stream.scaladsl.Source
import cats.syntax.show.toShow
import daf.dataset._
import daf.dataset.query.jdbc.{ JdbcResult, QueryFragmentWriterSyntax, Writers }
import daf.dataset.query.Query
import daf.web._
import daf.filesystem._
import daf.instances.FileSystemInstance
import it.gov.daf.common.utils._
import org.apache.hadoop.fs.Path
import play.api.libs.json.JsValue

import scala.concurrent.Future
import scala.util.{ Failure, Success, Try }

trait QueryExecution { this: DatasetController with DatasetExport with FileSystemInstance =>

  private def extractDatabaseName(parent: String, params: FileDatasetParams) = parent.toLowerCase match {
    case "opendata" => params.extraParams.get("theme").map { s => s"opendata__${s.toLowerCase}" } getOrElse "opendata" // append __{theme} for opendata
    case other      => other // use the parent dir for other data
  }

  private def extractTableName(path: Path, params: FileDatasetParams): Try[String] = Try {
    s"${extractDatabaseName(path.getParent.getName, params)}.${path.getName.toLowerCase}"
  }

  private def extractTableName(params: DatasetParams, userId: String): Try[String] = params match {
    case kudu: KuduDatasetParams => (proxyUser as userId) { downloadService.tableInfo(kudu.table) } map { _ => kudu.table }
    case file: FileDatasetParams => (proxyUser as userId) { extractTableName(file.path.asHadoop.resolve, file) }
  }

  private def prepareQuery(params: DatasetParams, query: Query, userId: String) = for {
    tableName <- extractTableName(params, userId)
    fragment  <- Writers.sql(query, tableName).write
  } yield fragment.query[Unit].sql

  private def analyzeQuery(params: DatasetParams, query: Query, userId: String) = for {
    tableName <- extractTableName(params, userId)
    analysis  <- queryService.explain(query, tableName, userId)
  } yield analysis

  private def transform(jdbcResult: JdbcResult, targetFormat: FileDataFormat) = targetFormat match {
    case CsvFileFormat  => Try {
      Source[String](jdbcResult.toCsv).map { csv => s"$csv${System.lineSeparator}" }
    }
    case JsonFileFormat => Try {
      wrapJson {
        Source[JsValue](jdbcResult.toJson).map { _.toString }
      }
    }
    case _              => Failure { new IllegalArgumentException(s"Invalid target format [$targetFormat]; must be [csv | json]") }
  }

  // Web
  // Failure

  private def failQuickExec(params: DatasetParams, targetFormat: FileDataFormat) = Future.successful {
    TemporaryRedirect {
      s"${controllers.routes.DatasetController.queryDataset(params.catalogUri, targetFormat.show, "batch").url}"
    }
  }

  // Executions

  private def doBatchExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = prepareQuery(params, query, userId) match {
    case Success(sql)   => prepareQueryExport(sql, targetFormat).map { formatExport(_, targetFormat) }
    case Failure(error) => Future.failed { error }
  }

  private def doQuickExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = for {
    tableName  <- extractTableName(params, userId)
    jdbcResult <- queryService.exec(query, tableName, userId)
    data       <- transform(jdbcResult, targetFormat)
  } yield data

  // API

  protected def quickExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = analyzeQuery(params, query, userId) match {
    case Success(analysis) if analysis.memoryEstimation <= impalaConfig.memoryEstimationLimit => doQuickExec(params, query, targetFormat, userId).~>[Future].map { respond(_, params.name, targetFormat) }
    case Success(_)                                                                           => failQuickExec(params, targetFormat)
    case Failure(error)                                                                       => Future.failed { error }
  }

  protected def batchExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) =
    doBatchExec(params, query, targetFormat, userId).map { respond(_, params.name, targetFormat) }

  protected def exec(params: DatasetParams, query: Query, userId: String, targetFormat: FileDataFormat, method: DownloadMethod) = method match {
    case QuickDownloadMethod => quickExec(params, query, targetFormat, userId)
    case BatchDownloadMethod => batchExec(params, query, targetFormat, userId)
  }

} 
Example 94
Source File: DownloadExecution.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import cats.syntax.show.toShow
import daf.dataset._
import daf.filesystem.{ FileDataFormat, fileFormatShow }
import daf.web._
import daf.instances.FileSystemInstance
import it.gov.daf.common.utils._

import scala.concurrent.Future
import scala.util.{ Failure, Success }

trait DownloadExecution { this: DatasetController with DatasetExport with FileSystemInstance =>

  // Failures

  private def failQuickDownload(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = Future.successful {
    TemporaryRedirect {
      s"${controllers.routes.DatasetController.getDataset(params.catalogUri, targetFormat.show, "batch", limit).url}"
    }
  }

  // Retrievals

  private def retrieveFileInfo(path: String, userId: String) = (proxyUser as userId) { downloadService.fileInfo(path) }

  private def retrieveTableInfo(tableName: String, userId: String) = (proxyUser as userId) { downloadService.tableInfo(tableName) }

  // Executions

  private def doTableExport(params: KuduDatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int]) = retrieveTableInfo(params.table, userId) match {
    case Success(_)     => prepareTableExport(params.table, targetFormat, params.extraParams, limit).map { formatExport(_, targetFormat) }
    case Failure(error) => Future.failed { error }
  }

  private def doFileExport(params: FileDatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int]) = retrieveFileInfo(params.path, userId) match {
    case Success(pathInfo) => prepareFileExport(pathInfo, params.format, targetFormat, params.extraParams, limit).map { formatExport(_, targetFormat) }
    case Failure(error)    => Future.failed { error }
  }

  private def doQuickFile(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = prepareDirect(params, targetFormat, limit).map { respond(_, params.name, targetFormat) }.~>[Future]

  private def quickFileDownload(params: FileDatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int]) = retrieveFileInfo(params.path, userId) match {
    case Success(pathInfo) if pathInfo.estimatedSize <= exportConfig.sizeThreshold => doQuickFile(params, targetFormat, limit)
    case Success(pathInfo)                                                         => failQuickDownload(params, targetFormat, limit)
    case Failure(error)                                                            => Future.failed { error }
  }

  // API

  protected def quickDownload(params: DatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int] = None) = params match {
    case fileParams: FileDatasetParams => quickFileDownload(fileParams, userId, targetFormat, limit)
    case kuduParams: KuduDatasetParams => failQuickDownload(kuduParams, targetFormat, limit) // no quick download option for kudu
  }

  protected def batchDownload(params: DatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int] = None) = params match {
    case kuduParams: KuduDatasetParams => doTableExport(kuduParams, userId, targetFormat, limit).map { respond(_, kuduParams.table, targetFormat) }
    case fileParams: FileDatasetParams => doFileExport(fileParams, userId, targetFormat, limit).map { respond(_, fileParams.name, targetFormat) }
  }

  protected def download(params: DatasetParams, userId: String, targetFormat: FileDataFormat, method: DownloadMethod, limit: Option[Int] = None) = method match {
    case QuickDownloadMethod => quickDownload(params, userId, targetFormat, limit)
    case BatchDownloadMethod => batchDownload(params, userId, targetFormat, limit)
  }

} 
Example 95
Source File: DatasetExport.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import akka.stream.scaladsl.{ Source, StreamConverters }
import cats.syntax.show.toShow
import daf.dataset.{ DatasetParams, ExtraParams }
import daf.filesystem.{ CsvFileFormat, FileDataFormat, JsonFileFormat, PathInfo, fileFormatShow }
import daf.web.contentType

import scala.concurrent.Future
import scala.util.{ Failure, Success }

trait DatasetExport { this: DatasetController =>

  protected def prepareDirect(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = targetFormat match {
    case JsonFileFormat => datasetService.jsonData(params, limit)
    case CsvFileFormat  => datasetService.csvData(params, limit)
    case _              => Failure { new IllegalArgumentException("Unable to prepare download; only CSV and JSON are permitted") }
  }

  protected def prepareFileExport(pathInfo: PathInfo, sourceFormat: FileDataFormat, targetFormat: FileDataFormat, extraParams: ExtraParams, limit: Option[Int] = None) =
    fileExportService.exportFile(pathInfo.path, sourceFormat, targetFormat, extraParams, limit).map { downloadService.openPath }.flatMap {
      case Success(stream) => Future.successful {
        StreamConverters.fromInputStream { () => stream }
      }
      case Failure(error)  => Future.failed { error }
    }

  protected def prepareTableExport(table: String, targetFormat: FileDataFormat, extraParams: ExtraParams, limit: Option[Int] = None) =
    fileExportService.exportTable(table, targetFormat, extraParams, limit).map { downloadService.openPath }.flatMap {
      case Success(stream) => Future.successful {
        StreamConverters.fromInputStream { () => stream }
      }
      case Failure(error)  => Future.failed { error }
    }

  protected def prepareQueryExport(query: String, targetFormat: FileDataFormat) =
    fileExportService.exportQuery(query, targetFormat).map { downloadService.openPath }.flatMap {
      case Success(stream) => Future.successful {
        StreamConverters.fromInputStream { () => stream }
      }
      case Failure(error)  => Future.failed { error }
    }

  protected def respond(data: Source[String, _], fileName: String, targetFormat: FileDataFormat) = Ok.chunked(data).withHeaders(
    CONTENT_DISPOSITION -> s"""attachment; filename="$fileName.${targetFormat.show}"""",
    CONTENT_TYPE        -> contentType(targetFormat)
  )

} 
Example 96
Source File: HDFSController.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import com.databricks.spark.avro._
import org.apache.spark.sql.{ DataFrame, SparkSession }
import org.slf4j.{Logger, LoggerFactory}

import scala.util.{Failure, Try}

class HDFSController(sparkSession: SparkSession) {

  val alogger: Logger = LoggerFactory.getLogger(this.getClass)

  def readData(path: String, format: String, separator: Option[String]): Try[DataFrame] =  format match {
    case "csv" => Try {
      val pathFixAle = path + "/" + path.split("/").last + ".csv"
      alogger.debug(s"questo e' il path $pathFixAle")
      separator match {
        case None => sparkSession.read.csv(pathFixAle)
        case Some(sep) => sparkSession.read.format("csv")
          .option("sep", sep)
          .option("inferSchema", "true")
          .option("header", "true")
          .load(pathFixAle)
      }
    }
    case "parquet" => Try { sparkSession.read.parquet(path) }
    case "avro"    => Try { sparkSession.read.avro(path) }
    case unknown   => Failure { new IllegalArgumentException(s"Unsupported format [$unknown]") }
  }
} 
Example 97
Source File: FileExportJob.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.dataset.export

import daf.dataset.ExtraParams
import daf.filesystem._
import org.apache.hadoop.fs.Path
import org.apache.livy.{ Job, JobContext }
import org.apache.spark.sql._

import scala.util.{ Failure, Success, Try }


class FileExportJob(val from: FileExportInfo, val to: FileExportInfo, val extraParams: Map[String, String], limit: Option[Int]) extends Job[String] {

  private val csvDelimiter     = extraParams.getOrElse("separator", ",")
  private val csvIncludeHeader = true
  private val csvInferSchema   = true

  // Export

  private def prepareCsvReader(reader: DataFrameReader) = reader
    .option("inferSchema", csvInferSchema)
    .option("header",      csvIncludeHeader)
    .option("delimiter",   csvDelimiter)

  private def prepareCsvWriter(writer: DataFrameWriter[Row]) = writer
    .option("header",    csvIncludeHeader)
    .option("delimiter", csvDelimiter)

  private def read(session: SparkSession) = from match {
    case FileExportInfo(path, RawFileFormat | CsvFileFormat) => prepareCsvReader(session.read).csv(path)
    case FileExportInfo(path, ParquetFileFormat)             => session.read.parquet(path)
    case FileExportInfo(path, JsonFileFormat)                => session.read.json(path)
    case FileExportInfo(_, unsupported)                      => throw new IllegalArgumentException(s"Input file format [$unsupported] is invalid")
  }

  private def addLimit(data: DataFrame) = limit match {
    case Some(value) => data.limit(value)
    case None        => data
  }

  private def write(data: DataFrame) = to match {
    case FileExportInfo(path, CsvFileFormat)  => prepareCsvWriter(data.write).csv(path)
    case FileExportInfo(path, JsonFileFormat) => data.write.json(path)
    case FileExportInfo(_, unsupported)       => throw new IllegalArgumentException(s"Output file format [$unsupported] is invalid")
  }

  private def doExport(session: SparkSession) = for {
    data    <- Try { read(session) }
    limited <- Try { addLimit(data) }
    _       <- Try { write(limited) }
  } yield ()

  override def call(jobContext: JobContext) = doExport { jobContext.sqlctx().sparkSession } match {
    case Success(_)     => to.path
    case Failure(error) => throw new RuntimeException("Export Job execution failed", error)
  }

}

object FileExportJob {

  def create(inputPath: String,
             outputPath: String,
             from: FileDataFormat,
             to: FileDataFormat,
             extraParams: ExtraParams = Map.empty[String, String],
             limit: Option[Int]) = new FileExportJob(
    FileExportInfo(inputPath, from),
    FileExportInfo(outputPath, to),
    extraParams,
    limit
  )

}

case class FileExportInfo(path: String, format: FileDataFormat)

object FileExportInfo {

  def apply(path: Path, format: FileDataFormat): FileExportInfo = apply(path.toUri.getPath, format)

} 
Example 98
Source File: QueryExportJob.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.dataset.export

import daf.dataset.ExtraParams
import daf.filesystem._
import org.apache.livy.{ Job, JobContext }
import org.apache.spark.sql._

import scala.util.{ Failure, Success, Try }


class QueryExportJob(val query: String, val to: FileExportInfo, val extraParams: Map[String, String]) extends Job[String] {

  private val csvDelimiter     = extraParams.getOrElse("separator", ",")
  private val csvIncludeHeader = true

  // Export

  private def prepareCsvWriter(writer: DataFrameWriter[Row]) = writer
    .option("header",    csvIncludeHeader)
    .option("delimiter", csvDelimiter)

  private def write(data: DataFrame) = to match {
    case FileExportInfo(path, CsvFileFormat)  => prepareCsvWriter(data.write).csv(path)
    case FileExportInfo(path, JsonFileFormat) => data.write.json(path)
    case FileExportInfo(_, unsupported)       => throw new IllegalArgumentException(s"Output file format [$unsupported] is invalid")
  }

  private def doExport(session: SparkSession) = for {
    data <- Try { session.sql(query) }
    _    <- Try { write(data) }
  } yield ()

  override def call(jobContext: JobContext) = doExport { jobContext.sqlctx().sparkSession } match {
    case Success(_)     => to.path
    case Failure(error) => throw new RuntimeException("Export Job execution failed", error)
  }

}

object QueryExportJob {

  def create(query: String, outputPath: String, to: FileDataFormat, extraParams: ExtraParams = Map.empty[String, String]) = new QueryExportJob(
    query,
    FileExportInfo(outputPath, to),
    extraParams
  )

} 
Example 99
Source File: KuduExportJob.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.dataset.export

import daf.filesystem.{ CsvFileFormat, FileDataFormat, JsonFileFormat }
import org.apache.livy.{ Job, JobContext }
import org.apache.kudu.spark.kudu._
import org.apache.spark.sql._

import scala.util.{ Failure, Success, Try }


class KuduExportJob(val table: String, val master: String, val to: FileExportInfo, extraParams: Map[String, String], limit: Option[Int]) extends Job[String] {

  private val csvDelimiter     = extraParams.getOrElse("separator", ",")
  private val csvIncludeHeader = true

  private def prepareCsvWriter(writer: DataFrameWriter[Row]) = writer
    .option("header",    csvIncludeHeader)
    .option("delimiter", csvDelimiter)

  private def prepareReader(reader: DataFrameReader) = reader
    .option("kudu.master", master)
    .option("kudu.table", table)

  private def read(session: SparkSession) = prepareReader { session.read }.kudu

  private def addLimit(data: DataFrame) = limit match {
    case Some(value) => data.limit(value)
    case None        => data
  }

  private def write(data: DataFrame) = to match {
    case FileExportInfo(path, CsvFileFormat)  => prepareCsvWriter(data.write).csv(path)
    case FileExportInfo(path, JsonFileFormat) => data.write.json(path)
    case FileExportInfo(_, unsupported)       => throw new IllegalArgumentException(s"Output file format [$unsupported] is invalid")
  }

  private def doExport(session: SparkSession) = for {
    data    <- Try { read(session) }
    limited <- Try { addLimit(data) }
    _       <- Try { write(limited) }
  } yield ()

  def call(jobContext: JobContext) = doExport { jobContext.sqlctx().sparkSession } match {
    case Success(_)     => to.path
    case Failure(error) => throw new RuntimeException("Export Job execution failed", error)
  }

}

object KuduExportJob {

  def create(table: String,
             master: String,
             outputPath: String,
             outputFormat: FileDataFormat,
             extraParams: Map[String, String] = Map.empty[String, String],
             limit: Option[Int]) = new KuduExportJob(
    table,
    master,
    FileExportInfo(outputPath, outputFormat),
    extraParams,
    limit
  )

} 
Example 100
Source File: TestCatalogClient.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import java.io.FileNotFoundException

import daf.catalogmanager._

import scala.util.{ Failure, Success }

trait TestCatalogClient { this: DatasetController =>

  override protected val catalogClient = new TestCatalogManagerClient

}

sealed class TestCatalogManagerClient extends CatalogManagerClient("") {

  private def makeCatalog(id: String) = MetaCatalog(
    dataschema  = DatasetCatalog(
      avro = null,
      flatSchema = List.empty,
      kyloSchema = None
    ),
    operational = Operational(
      theme              = "",
      subtheme           = "",
      logical_uri        = id,
      physical_uri       = Some { s"test-dir/$id" },
      is_std             = true,
      group_own          = "test",
      group_access       = None,
      std_schema         = None,
      read_type          = "",
      georef             = None,
      input_src          = null,
      ingestion_pipeline = None,
      storage_info       = Some {
        StorageInfo(
          hdfs = Some { StorageHdfs(s"test-dir/$id", Some(s"test-dir/$id"), None) },
          kudu = None,
          hbase = None,
          textdb = None,
          mongo = None
        )
      },
      dataset_type       = ""
    ),
    dcatapit = null
  )

  override def getById(authorization: String, catalogId: String) = catalogId match {
    case "path/to/failure" => Failure { new FileNotFoundException("Encountered failure condition") }
    case other             => Success { makeCatalog(other) }
  }

} 
Example 101
Source File: HDFSControllerSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers

import daf.util.HDFSBase

import scala.util.Failure

class HDFSControllerSpec extends HDFSBase {

  var HDFSController: HDFSController = _

  override def beforeAll(): Unit = {
    super.beforeAll()
    HDFSController = new HDFSController(sparkSession)
  }

  "A HDFS controller" should "get a dataset from hdfs when a path exists" in {

    val dfParquet = HDFSController.readData(pathParquet, "parquet", None)
    val dfAvro = HDFSController.readData(pathAvro, "avro", None)
    val dfCsv = HDFSController.readData(pathCsv, "csv", None)

    dfParquet shouldBe 'Success
    dfParquet.get.count() should be > 0L
    dfParquet.foreach(_.show())

    dfAvro shouldBe 'Success
    dfAvro.get.count() should be > 0L
    dfAvro.foreach(_.show())

    dfCsv shouldBe 'Success
    dfCsv.get.count() should be > 0L
    dfCsv.foreach(_.show())
  }

  it should "handle requests with wrong paths"in {
    val df = HDFSController.readData(s"wrongPath/test.parquet", "parquet", None)
    df shouldBe 'Failure
  }

  it should "handle requests with wrong format" in {
    val df = HDFSController.readData(s"wrongPath/test.parquet", "wrongFormat", None)
    df shouldBe 'Failure
    df === Failure(new IllegalArgumentException("Format wrongFormat is not implemented"))
  }

} 
Example 102
Source File: HDFSBase.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.util

import better.files.{ File, _ }
import daf.util.DataFrameClasses.{ Address, Person }
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hdfs.{ HdfsConfiguration, MiniDFSCluster }
import org.apache.hadoop.test.PathUtils
import org.apache.spark.sql.{ SaveMode, SparkSession }
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }
import org.slf4j.LoggerFactory

import scala.util.{ Failure, Random, Try }

abstract class HDFSBase extends FlatSpec with Matchers with BeforeAndAfterAll {

  var miniCluster: Try[MiniDFSCluster] = Failure[MiniDFSCluster](new Exception)

  var fileSystem: Try[FileSystem] = Failure[FileSystem](new Exception)

  val sparkSession: SparkSession = SparkSession.builder().master("local").getOrCreate()

  val alogger = LoggerFactory.getLogger(this.getClass)

  val (testDataPath, confPath) = {
    val testDataPath = s"${PathUtils.getTestDir(this.getClass).getCanonicalPath}/MiniCluster"
    val confPath = s"$testDataPath/conf"
    (
      testDataPath.toFile.createIfNotExists(asDirectory = true, createParents = false),
      confPath.toFile.createIfNotExists(asDirectory = true, createParents = false)
    )
  }

  def pathAvro = "opendata/test.avro"
  def pathParquet = "opendata/test.parquet"
  def pathCsv = "opendata/test.csv"

  def getSparkSession = sparkSession

  override def beforeAll(): Unit = {

    val conf = new HdfsConfiguration()
    conf.setBoolean("dfs.permissions", true)
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA)

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath.pathAsString)
    //FileUtil.fullyDelete(testDataPath.toJava)

    conf.set(s"hadoop.proxyuser.${System.getProperties.get("user.name")}.groups", "*")
    conf.set(s"hadoop.proxyuser.${System.getProperties.get("user.name")}.hosts", "*")

    val builder = new MiniDFSCluster.Builder(conf)
    miniCluster = Try(builder.build())
    fileSystem = miniCluster.map(_.getFileSystem)
    fileSystem.foreach(fs => {
      val confFile: File = confPath / "hdfs-site.xml"
      for { os <- confFile.newOutputStream.autoClosed } fs.getConf.writeXml(os)
    })

    writeDf()
  }

  override def afterAll(): Unit = {
    miniCluster.foreach(_.shutdown(true))
    val _ = testDataPath.parent.parent.delete(true)
    sparkSession.stop()
  }

  
  private def writeDf(): Unit = {
    import sparkSession.implicits._

    alogger.info(s"TestDataPath ${testDataPath.toJava.getAbsolutePath}")
    alogger.info(s"ConfPath ${confPath.toJava.getAbsolutePath}")
    val persons = (1 to 10).map(i => Person(s"Andy$i", Random.nextInt(85), Address("Via Ciccio Cappuccio")))
    val caseClassDS = persons.toDS()
    caseClassDS.write.format("parquet").mode(SaveMode.Overwrite).save(pathParquet)
    caseClassDS.write.format("com.databricks.spark.avro").mode(SaveMode.Overwrite).save(pathAvro)
    //writing directly the Person dataframe generates an exception
    caseClassDS.toDF.select("name", "age").write.format("csv").mode(SaveMode.Overwrite).option("header", "true").save(pathCsv)
  }
}

object DataFrameClasses {

  final case class Address(street: String)

  final case class Person(name: String, age: Int, address: Address)
} 
Example 103
Source File: ServiceSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import java.io.{File, FileNotFoundException, IOException}
import java.net.ServerSocket
import java.util.Base64

import it.gov.daf.entitymanager.Entity
import it.gov.daf.entitymanager.client.Entity_managerClient
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAfterAll
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.ws.ahc.AhcWSClient
import play.api.test.WithServer

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Random, Try}

@SuppressWarnings(
  Array(
    "org.wartremover.warts.NonUnitStatements",
    "org.wartremover.warts.Throw",
    "org.wartremover.warts.Var"
  )
)
class ServiceSpec extends Specification with BeforeAfterAll {

  def getAvailablePort: Int = {
    try {
      val socket = new ServerSocket(0)
      try {
        socket.getLocalPort
      } finally {
        socket.close()
      }
    } catch {
      case e: IOException =>
        throw new IllegalStateException(s"Cannot find available port: ${e.getMessage}", e)
    }
  }

  private def constructTempDir(dirPrefix: String): Try[File] = Try {
    val rndrange = 10000000
    val file = new File(System.getProperty("java.io.tmpdir"), s"$dirPrefix${Random.nextInt(rndrange)}")
    if (!file.mkdirs())
      throw new RuntimeException("could not create temp directory: " + file.getAbsolutePath)
    file.deleteOnExit()
    file
  }

  private def deleteDirectory(path: File): Boolean = {
    if (!path.exists()) {
      throw new FileNotFoundException(path.getAbsolutePath)
    }
    var ret = true
    if (path.isDirectory)
      path.listFiles().foreach(f => ret = ret && deleteDirectory(f))
    ret && path.delete()
  }

  var tmpDir: Try[File] = Failure[File](new Exception(""))
  
  def application: Application = GuiceApplicationBuilder().
    configure("pac4j.authenticator" -> "test").
    configure("janusgraph.storage.directory" -> s"${tmpDir.map(_.getCanonicalPath).getOrElse("db")}/berkeleyje").
    configure("janusgraph.index.search.directory" -> s"${tmpDir.map(_.getCanonicalPath).getOrElse("db")}/lucene").
    build()

  "The entity_manager" should {
    "create an entity and retrieve it correctly" in new WithServer(app = application, port = getAvailablePort) {

      val ws: AhcWSClient = AhcWSClient()

      val plainCreds = "david:david"
      val plainCredsBytes = plainCreds.getBytes
      val base64CredsBytes = Base64.getEncoder.encode(plainCredsBytes)
      val base64Creds = new String(base64CredsBytes)

      val client = new Entity_managerClient(ws)(s"http://localhost:$port")

      val result = Await.result(client.createEntity(s"Basic $base64Creds", Entity("DAVID")), Duration.Inf)
      val entity = Await.result(client.getEntity(s"Basic $base64Creds", "DAVID"), Duration.Inf)

      entity must beEqualTo(Entity("DAVID"))
    }
  }

  override def beforeAll(): Unit = tmpDir = constructTempDir("test")

  override def afterAll(): Unit = tmpDir.foreach(deleteDirectory(_))
} 
Example 104
Source File: Iot_managerSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import java.io.{IOException, File => JFile}
import java.net.ServerSocket
import java.util.Base64

import better.files._
import it.gov.daf.iotmanager.client.Iot_managerClient
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAfterAll
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.ws.ahc.AhcWSClient
import play.api.libs.ws.{WSAuthScheme, WSResponse}
import play.api.test.{WithServer, WsTestClient}

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.util.{Failure, Try}

import org.apache.solr.client.solrj.embedded.JettyConfig
import org.apache.solr.client.solrj.embedded.JettySolrRunner
import org.eclipse.jetty.servlet.ServletHolder


@SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements", "org.wartremover.warts.Throw"))
class Iot_managerSpec extends Specification with BeforeAfterAll {

  import Iot_managerSpec._

  def getAvailablePort: Int = {
    try {
      val socket = new ServerSocket(0)
      try {
        socket.getLocalPort
      } finally {
        socket.close()
      }
    } catch {
      case e: IOException =>
        throw new IllegalStateException(s"Cannot find available port: ${e.getMessage}", e)
    }
  }

  def application: Application = GuiceApplicationBuilder().
    configure("hadoop_conf_dir" -> s"${ServiceSpec.confPath.pathAsString}").
    configure("pac4j.authenticator" -> "test").
    build()

  "The security_manager" should {
    "manage user tokens correctly" in new WithServer(app = application, port = getAvailablePort) {
      print("ciao ciao")


    }
  }

  override def beforeAll(): Unit = {
    val solrXml = new Nothing("/solr/home/solr.xml")
    val solrHomeDir = solrXml.getParentFile

    val port = 8080
    val context = "/solr"
    // use org.apache.solr.client.solrj.embedded.JettySolrRunner
    val jettySolr = new Nothing(solrHomeDir.getAbsolutePath, context, port)

    val waitUntilTheSolrWebAppHasStarted = true
    jettySolr.start(waitUntilTheSolrWebAppHasStarted)
  }

  override def afterAll(): Unit = {
    jettySolr.stop()
  }
}

@SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.Null"))
object Iot_managerSpec {



} 
Example 105
Source File: UriDataset.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.catalogmanager.utilities.uri

import catalog_manager.yaml.MetaCatalog
import com.typesafe.config.ConfigFactory
import it.gov.daf.catalogmanager.utilities.datastructures.DatasetType
import play.api.Logger

import scala.util.{Failure, Success, Try}

case class UriDataset(
                       domain: String = "NO_DOMAIN",
                       typeDs: DatasetType.Value = DatasetType.RAW,
                       groupOwn: String = "NO_groupOwn",
                       owner: String = "NO_owner",
                       theme: String = "NO_theme",
                       subtheme :String = "NO_theme",
                       nameDs: String = "NO_nameDs") {

  val config = ConfigFactory.load()

  def getUri(): String = {
    domain + "://" + "dataset/" + typeDs + "/" + groupOwn + "/" + owner + "/" + theme + "/" + subtheme + "/" + nameDs
  }


  def getUrl(): String = {

    val basePath = config.getString("Inj-properties.hdfsBasePath")
    val baseDataPath = config.getString("Inj-properties.dataBasePath")
    typeDs match {
      case DatasetType.STANDARD => basePath + baseDataPath + "/" + typeDs + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs
      case DatasetType.ORDINARY => basePath + baseDataPath + "/" + typeDs + "/" + owner + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs
      case DatasetType.RAW => basePath + baseDataPath + "/" + typeDs + "/" + owner + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs
      case _ => "-1"
    }
  }
}


object UriDataset  {
  def apply(uri: String): UriDataset = {
    Try {
      val uri2split = uri.split("://")
      val uriParts = uri2split(1).split("/")
      new UriDataset(
        domain = uri2split(0),
        typeDs = DatasetType.withNameOpt(uriParts(1)).get,
        groupOwn = uriParts(2),
        owner = uriParts(3),
        theme = uriParts(4),
        subtheme = uriParts(5),
        nameDs = uriParts(6))
    } match {
      case Success(s) => s
      case Failure(err) =>
        Logger.error("Error while creating uri: " + uri + " - " + err.getMessage)
        UriDataset()
    }

  }

  def convertToUriDataset(schema: MetaCatalog): UriDataset =  {

      val typeDs = if (schema.operational.is_std)
        DatasetType.STANDARD
      else
        DatasetType.ORDINARY
      new UriDataset(
        domain = "daf",
        typeDs = typeDs,
        groupOwn = schema.operational.group_own,
        owner = schema.dcatapit.owner_org.get,
        theme  = schema.operational.theme,
        subtheme = schema.operational.subtheme,
        nameDs = schema.dataschema.avro.name
      )

  }

} 
Example 106
Source File: HDFSMiniCluster.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.teamdigitale.miniclusters

import better.files.File
import org.apache.logging.log4j.LogManager
import org.apache.hadoop.hdfs.HdfsConfiguration
import org.apache.hadoop.test.PathUtils
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hdfs.{HdfsConfiguration, MiniDFSCluster}
import better.files._

import scala.util.{Failure, Try}

class HDFSMiniCluster extends AutoCloseable {
  val alogger = LogManager.getLogger(this.getClass)

  var hdfsCluster: Try[MiniDFSCluster] = Failure[MiniDFSCluster](new Exception)
  var fileSystem: Try[FileSystem] = Failure[FileSystem](new Exception)

  val (testDataPath, confPath) = {
    val testDataPath = s"${PathUtils.getTestDir(classOf[HDFSMiniCluster]).getCanonicalPath}/MiniCluster"
    val confPath = s"$testDataPath/conf"
    (
      testDataPath.toFile.createIfNotExists(asDirectory = true, createParents = false),
      confPath.toFile.createIfNotExists(asDirectory = true, createParents = false)
    )
  }


  def start(): Unit = {

    alogger.info("Starting HDFS mini cluster")
    val conf = new HdfsConfiguration()
    conf.setBoolean("dfs.permissions", true)
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA)

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath.pathAsString)
    //FileUtil.fullyDelete(testDataPath.toJava)

    conf.set(s"hadoop.proxyuser.${System.getProperties.get("user.name")}.groups", "*")
    conf.set(s"hadoop.proxyuser.${System.getProperties.get("user.name")}.hosts", "*")

    val builder = new MiniDFSCluster.Builder(conf)
    hdfsCluster = Try(builder.build())
    fileSystem = hdfsCluster.map(_.getFileSystem)
    fileSystem.foreach(fs => {
      val confFile: File = confPath / "hdfs-site.xml"
      for {os <- confFile.newOutputStream.autoClosed} fs.getConf.writeXml(os)
    })

  }


  override def close() = {
    alogger.info("Stopping HDFS mini cluster")
    hdfsCluster.foreach(_.shutdown(true))
    val _ = testDataPath.parent.parent.delete(true)
  }
} 
Example 107
Source File: KuduEventsHandlerSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.teamdigitale.storage

import java.io.File
import java.util.concurrent.TimeUnit

import org.apache.kudu.spark.kudu._
import it.teamdigitale.miniclusters.KuduMiniCluster
import it.teamdigitale.config.IotIngestionManagerConfig.KuduConfig
import it.teamdigitale.managers.IotIngestionManager
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import it.gov.daf.iotingestion.event.Event
import it.teamdigitale.EventModel.{EventToKuduEvent, EventToStorableEvent}
import org.apache.logging.log4j.LogManager

import scala.util.{Failure, Success, Try}

class KuduEventsHandlerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  val logger = LogManager.getLogger(this.getClass)
  val kuduCluster = new KuduMiniCluster()

  val metrics: Seq[Try[Event]] = Range(0,100).map(x => Success( Event(
    version = 1L,
    id = x + "metric",
    ts = System.currentTimeMillis() + x ,
    event_type_id = 0,
    location = "41.1260529:16.8692905",
    source = "http://domain/sensor/url",
    body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()),
    event_subtype_id = Some("Via Cernaia(TO)"),
    attributes = Map("value" -> x.toString)
  )))

  val rdd = kuduCluster.sparkSession.sparkContext.parallelize(metrics)


  "KuduEventsHandler" should "store correctly data" in {

   val metricsRDD = rdd
      .map(event => EventToStorableEvent(event))
      .flatMap(e => e.toOption)
      .map(se => EventToKuduEvent(se)).flatMap(e => e.toOption)

    val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD)

    val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEvents", 2)

    KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig)
    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)

    val df = kuduCluster.sparkSession.sqlContext
      .read
      .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName))
      .kudu

    df.count shouldBe 100

  }

  "KuduEventsHandler" should "handle redundant data" in {

    val metricsRDD = rdd
      .map(event => EventToStorableEvent(event))
      .flatMap(e => e.toOption)
      .map(se => EventToKuduEvent(se))
      .flatMap(e => e.toOption)

    val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD)

    val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEventsDuplicate", 2)
    KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig)

    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)
    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)

    val df = kuduCluster.sparkSession.sqlContext
      .read
      .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName))
      .kudu

    df.count shouldBe 100

  }

  override def beforeAll() {
    kuduCluster.start()
  }

  override def afterAll() {
    kuduCluster.start()
  }

} 
Example 108
Source File: SonarFileSystem.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.mwz.sonar.scala
package util
package syntax

import java.io.File
import java.nio.file.Path

import scala.util.{Failure, Success, Try}

import cats.syntax.flatMap._
import cats.{Monad, MonoidK}
import org.sonar.api.batch.fs.FileSystem

object SonarFileSystem {
  implicit final class FileSystemOps(private val fs: FileSystem) extends AnyVal {

    
    def resolve[F[_]: Monad: MonoidK](toResolve: F[Path]): F[File] =
      toResolve.flatMap[File] { path =>
        Try(fs.resolvePath(path.toString)) match {
          case Failure(_) => MonoidK[F].empty
          case Success(f) => Monad[F].pure(f)
        }
      }
  }
} 
Example 109
Source File: PlayParSeq.scala    From play-parseq   with Apache License 2.0 5 votes vote down vote up
package com.linkedin.playparseq.s

import com.linkedin.parseq.{Engine, Task}
import com.linkedin.parseq.promise.Promises
import com.linkedin.playparseq.s.PlayParSeqImplicits._
import com.linkedin.playparseq.s.stores.ParSeqTaskStore
import com.linkedin.playparseq.utils.PlayParSeqHelper
import javax.inject.{Inject, Singleton}
import play.api.mvc.RequestHeader
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}



  override def runTask[T](task: Task[T])(implicit requestHeader: RequestHeader): Future[T] = {
    // Bind a Future to the ParSeq Task
    val future: Future[T] = bindTaskToFuture(task)
    // Put the ParSeq Task into store
    parSeqTaskStore.put(task)
    // Run the ParSeq Task
    engine.run(task)
    // Return the Future
    future
  }

} 
Example 110
Source File: Effects.scala    From monadless   with Apache License 2.0 5 votes vote down vote up
package io.monadless.lst

import scala.util.Failure
import scala.util.Try
import scala.util.Success
import scala.concurrent.Future
import scala.concurrent.Promise

object Effects {

  val optionEffect = new SyncEffect[Option] {
    def point[T](v: T) = Some(v)
    def lift[T](v: => T) = Option(v)
    def apply[T](o: Option[T]) =
      o match {
        case Some(v) => Sync(Left(v))
        case None    => Sync(Right(None))
      }
  }

  val tryEffect = new SyncEffect[Try] {
    def point[T](v: T) = Success(v)
    def lift[T](v: => T) = Try(v)
    def apply[T](o: Try[T]) =
      o match {
        case Success(v)  => Sync(Left(v))
        case Failure(ex) => Sync(Right(Failure(ex)))
      }
  }

  val futureEffect = new AsyncEffect[Future] {
    import scala.concurrent.ExecutionContext.Implicits.global
    def point[T](v: T) = Future.successful(v)
    def lift[T](v: => T) = Future(v)
    def async[T](r: Async[Future[T]]): Future[T] = {
      val p = Promise[T]()
      r.cb(p.completeWith(_))
      p.future
    }
    def apply[T](o: Future[T]) =
      Async { f =>
        o.onComplete {
          case Success(v)  => f(Left(v))
          case Failure(ex) => f(Right(Future.failed(ex)))
        }
      }
  }

} 
Example 111
Source File: _03_TryWithPatternMatching.scala    From LearningScala   with Apache License 2.0 5 votes vote down vote up
package _090_failure_handling

import scala.util.{Failure, Success, Try}

object _03_TryWithPatternMatching {
  def convertToInt(s: String): Try[Int] = Try(s.toInt)

  val printMyInteger: Try[_] => Unit = {
    case Success(n) => println(n)
    case Failure(ex) => println(ex)
  }

  def main(args: Array[String]): Unit = {
    val a = convertToInt("123")
    val b = convertToInt("1a2b3")
    val list = List("12", "x", "23", "14", "y", "18")

    printMyInteger(a)
    printMyInteger(b)

    println(list.map(convertToInt))
  }
} 
Example 112
Source File: DynamoActor.scala    From scala-spark-cab-rides-predictions   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{Actor, ActorLogging, Status}
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult
import dynamodb.{CabImpl, WeatherImp}
import models.{CabPriceBatch, WeatherBatch}

import scala.concurrent.Future
import scala.util.{Failure, Success}


  def putCabPrices(cabPriceBatch: CabPriceBatch): Unit = {
    val cabPrices = cabPriceBatch.cabPrices.toSeq
    log.info("received " + cabPrices.size + " number of cab price records")
    val result: Future[Seq[BatchWriteItemResult]] = CabImpl.put(cabPrices)
    result onComplete {
      case Success(_) => log.info("Cab Prices Batch processed on DynamoDB")
      case Failure(exception) => log.error("error process Cab Prices batch on dynamoDB :" + exception.getStackTrace)
    }
  }
} 
Example 113
Source File: Director.scala    From nescala   with GNU General Public License v2.0 5 votes vote down vote up
package com.owlandrews.nescala.ui

import java.awt.Canvas

import com.owlandrews.nescala.BuildInfo
import org.lwjgl.opengl.{Display, GL11}

import scala.swing.Dialog
import scala.util.{Failure, Success, Try}

case class Director(gameWindow : Canvas, menuWindow: WrapPanel) {
  
  private var view : Option[View] = None
  private var pause = false

  def Menu() = setView(Some(MenuView(menuWindow)))

  def Reset() = view.foreach(_.Reset())

  def Close() = setView(None)

  def Pause() = pause = true

  def Resume() = pause = false

  def Save() = view.foreach(_.Save())

  def Start(path: String) = loadGameView(path) match {
            case Success(_) => run()
            case Failure(e) => Dialog.showMessage(new {def peer = gameWindow.getParent}, e.getMessage, BuildInfo.name, Dialog.Message.Warning)
  }

  private def loadGameView(path: String) = Try(com.owlandrews.nescala.Console(path)).map(console => setView(Some(GameView(console, gameWindow))))

  private def setView(view : Option[View]) {
    this.view.foreach(_.Close())
    this.view = view
    this.view.foreach(_.Open())
  }

  private def step(ts:Long) = {
    // Clear the screen and depth buffer
    GL11.glClear(GL11.GL_COLOR_BUFFER_BIT)

    val dt = System.nanoTime() - ts
    val nextTimeStamp = System.nanoTime()
    val seconds = dt / 1000000000.0F
    view.foreach(_.Update(seconds))
    nextTimeStamp
  }

  private def run() = {
    var timestamp = System.nanoTime()
    while (view.isDefined) {
      if (!pause) timestamp = step(timestamp)
      Display.update()
    }
    Display.destroy()
  }
} 
Example 114
Source File: LogCollector.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse.logcollector

import java.io.FileInputStream
import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.{ ActorMaterializer, Materializer }
import com.typesafe.scalalogging.LazyLogging
import io.phdata.pulse.common.SolrService
import io.phdata.pulse.solr.SolrProvider
import org.apache.kudu.client.KuduClient.KuduClientBuilder

import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future }
import scala.util.{ Failure, Success }


  def main(args: Array[String]): Unit =
    System.getProperty("java.security.auth.login.config") match {
      case null => {
        logger.info(
          "java.security.auth.login.config is not set, continuing without kerberos authentication")
      }
      case _ => {
        KerberosContext.scheduleKerberosLogin(0, 9, TimeUnit.HOURS)
      }

      start(args)

    }

  private def start(args: Array[String]): Unit = {
    val cliParser = new LogCollectorCliParser(args)

    val solrService = SolrProvider.create(cliParser.zkHosts().split(",").toList)
    val solrStream  = new SolrCloudStream(solrService)

    val kuduClient =
      cliParser.kuduMasters.toOption.map(masters =>
        KerberosContext.runPrivileged(new KuduClientBuilder(masters).build()))

    val kuduService =
      kuduClient.map(client => KerberosContext.runPrivileged(new KuduService(client)))

    val routes = new LogCollectorRoutes(solrStream, kuduService)

    cliParser.mode() match {
      case "kafka" => {
        kafka(solrService, cliParser.kafkaProps(), cliParser.topic())
      }
      case _ => {
        http(cliParser.port(), routes)
      }
    }
  }

  // Starts Http Service
  def http(port: Int, routes: LogCollectorRoutes): Future[Unit] = {
    implicit val actorSystem: ActorSystem   = ActorSystem()
    implicit val ec                         = actorSystem.dispatchers.lookup("akka.actor.http-dispatcher")
    implicit val materializer: Materializer = ActorMaterializer.create(actorSystem)

    val httpServerFuture = Http().bindAndHandle(routes.routes, "0.0.0.0", port)(materializer) map {
      binding =>
        logger.info(s"Log Collector interface bound to: ${binding.localAddress}")
    }

    httpServerFuture.onComplete {
      case Success(v) => ()
      case Failure(ex) => {
        logger.error("HTTP server failed, exiting. ", ex)
        System.exit(1)
      }
    }

    Await.ready(
      httpServerFuture,
      Duration.Inf
    )
  }

  // Starts Kafka Consumer
  def kafka(solrService: SolrService, kafkaProps: String, topic: String): Unit = {

    val solrCloudStream = new SolrCloudStream(solrService)

    val kafkaConsumer      = new PulseKafkaConsumer(solrCloudStream)
    val kafkaConsumerProps = new Properties()

    kafkaConsumerProps.load(new FileInputStream(kafkaProps))

    kafkaConsumer.read(kafkaConsumerProps, topic)
  }
} 
Example 115
Source File: Stream.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse

import io.phdata.pulse.log.{ HttpManager, JsonParser }
import monix.reactive.subjects.ConcurrentSubject
import monix.execution.Scheduler.Implicits.global
import monix.reactive.OverflowStrategy
import org.apache.log4j.helpers.LogLog
import org.apache.log4j.spi.LoggingEvent

import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success, Try }

abstract class Stream[E](flushDuration: FiniteDuration, flushSize: Int, maxBuffer: Int) {

  val overflowStragegy = OverflowStrategy.DropNewAndSignal(maxBuffer, (_: Long) => None)
  val subject          = ConcurrentSubject.publish[E](overflowStragegy)

  subject
    .bufferTimedAndCounted(flushDuration, flushSize)
    .map(save)
    .subscribe()

  def append(value: E): Unit =
    Try { subject.onNext(value) } match {
      case Success(_) => ()
      case Failure(e) => LogLog.error("Error appending to stream", e)
    }

  def save(values: Seq[E])

}

class HttpStream(flushDuration: FiniteDuration,
                 flushSize: Int,
                 maxBuffer: Int,
                 httpManager: HttpManager)
    extends Stream[LoggingEvent](flushDuration, flushSize, maxBuffer) {

  val jsonParser = new JsonParser

  override def save(values: Seq[LoggingEvent]): Unit = {
    val logArray = values.toArray
    LogLog.debug(s"Flushing ${logArray.length} messages")
    val logMessage = jsonParser.marshallArray(logArray)

    httpManager.send(logMessage)
  }
} 
Example 116
Source File: ValidationImplicitsTest.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse.collectionroller.util

import org.scalatest.FunSuite

import scala.util.{ Failure, Success, Try }

class ValidationImplicitsTest extends FunSuite {

  import ValidationImplicits._

  val testKeyword = "worked"

  val success: Try[String] = Success(testKeyword)
  val failure: Try[String] = Failure(new Exception())
  val sequence             = Seq(success, failure)

  test("map over a sequence of valid values") {
    val mapped = sequence.toValidated().mapValid(x => x.toUpperCase())
    assert(mapped.exists(x => x.exists(_ == testKeyword.toUpperCase())))
  }

  test("convert at Try into a Validated") {
    assert(Try(throw new Exception).toValidated().isInvalid)
    assert(Try(1).toValidated().isValid)
  }

  test("convert an Iterable[Try] to Iterable[Validated]") {
    assert(sequence.toValidated().exists(_.isValid))
    assert(sequence.toValidated().exists(_.isInvalid))
  }
} 
Example 117
Source File: TemplateRendering.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.journalist.utils

import liigavoitto.util.Logging

import scala.util.{Failure, Success, Try}

case class Template(template: String, weight: Double = 1.0) {
  require(weight > 0.0)
}

case class RenderedTemplate(text: String, weight: Double) {
  require(weight > 0.0)
}

object TemplateRendering extends Logging{
  def render(template: Template,
             attributes: Map[String, Any]): Option[RenderedTemplate] = {
    Try {
      RenderedTemplate(Mustache(template.template).apply(attributes), template.weight)
    } match {
      case Success(rendered) => Some(rendered)
      case Failure(e) =>
        log.warn(s"Could not render '$template': " + e.getMessage)
        None
    }
  }
} 
Example 118
Source File: TemplateLoader.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.journalist.utils

import liigavoitto.util.Logging
import scaledn.parser.parseEDN
import scaledn.{EDN, EDNKeyword, EDNSymbol}

import scala.io.Source
import scala.reflect.ClassTag
import scala.util.{Failure, Try}

trait TemplateLoader extends Logging {

  type FileContent = Map[EDNKeyword, Map[EDNKeyword, List[TemplateVector]]]
  type TemplateVector = Vector[Any]
  type TemplateSettings = Map[EDNKeyword, Any]
  val WeightKey = EDNKeyword(EDNSymbol("weight"))

  def load(filePath: String, templatesName: String, language: String) = {
    val content = loadResource(filePath)
    val parsed = parseEDN(content)
    logErrors(parsed, filePath)
    val mapped = parsed.get.asInstanceOf[FileContent]
    getTemplates(mapped, templatesName, language)
  }

  private def getTemplates(parsed: FileContent, name: String, language: String) = {
    val templatesName = EDNKeyword(EDNSymbol(name))
    val languageKey = EDNKeyword(EDNSymbol(language))
    parsed(templatesName)(languageKey).map(parseTemplate)
  }

  private def parseTemplate(vector: TemplateVector) = {
    val tmpl = vector(0).asInstanceOf[String]
    val weight = getWeight(vector)
    if (weight.isDefined)
      Template(tmpl, weight.get)
    else
      Template(tmpl)
  }

  private def asInstanceOfOption[T: ClassTag](o: Any): Option[T] =
    Some(o) collect { case m: T => m }

  private def getWeight(vector: Vector[Any]) = for {
      opts <- vector.lift(1)
      settings <- asInstanceOfOption[TemplateSettings](opts)
      value <- settings.get(WeightKey)
      asDouble <- asInstanceOfOption[Double](value)
    } yield asDouble

  private def loadResource(path: String) = {
    val resourcePath =  path
    val res = getClass.getClassLoader.getResource(resourcePath)
    val source = Source.fromURL(res)
    source.mkString
  }

  private def logErrors(parsed: Try[EDN], filePath: String) = parsed match {
      case Failure(f : org.parboiled2.ParseError) => {
        log.error(s"$filePath ParseError at line " + f.position.line + " col " + f.position.column)
      }
      case _ =>
    }
} 
Example 119
Source File: ScoresApiParser.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.scores

import org.joda.time.DateTime
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import liigavoitto.util.{ DateTimeNoMillisSerializer, Logging }

import scala.util.{ Failure, Success, Try }

case class Data(data: List[Sport])
case class Sport(id: String, series: List[Series])
case class Series(id: String, seasons: List[Season])
case class Season(id: String, stages: List[Stage])
case class Stage(id: String, matches: Option[List[Match]],
  standing: Option[List[LeagueTableEntry]],
  playerStatistics: Option[List[PlayerStatsEntry]])

case class Match(id: String, name: String, date: DateTime, status: String, teams: List[Team], feed: List[Feed] = List(), stats: GeneralMatchStats)
case class Feed(`type`: String, gameTime: Option[String], period: Option[String], player: Option[FeedPlayer], standing: Option[String], team: Option[Team], goalType: Option[String], saves: Option[String], timeInMins: Option[String], text: Option[String], beginTime: Option[String], endTime: Option[String])
case class FeedPlayer(id: String, name: PlayerName, meta: Option[PlayerMeta])
case class Team(id: String, name: String, abbr: String, meta: Meta, score: Option[Score], players: List[Player])
case class Score(now: Int, periods: List[Period], outcome: Outcome)
case class Period(id: String, score: Int)
case class Meta(images: List[Image], directives: Option[Map[String, Any]])
case class Image(id: String, imageType: String)
case class Outcome(wins: Int, draws: Int, losses: Int, otWins: Int, otLosses: Int)

case class Player(id: String, name: PlayerName, position: Option[String], specific: Map[String, Any], meta: Option[PlayerMeta])
case class PlayerName(first: String, last: String)
case class PlayerMeta(gender: Option[String] = None, country: Option[String] = None, tags: List[String] = Nil, directives: Map[String, String] = Map.empty)

case class LeagueTableEntry(
  team: Team,
  home: Option[LeagueTableResult] = None,
  away: Option[LeagueTableResult] = None,
  total: Option[LeagueTableResult] = None,
  specific: Option[Map[String, Any]] = None
)
case class LeagueTableResult(gamesPlayed: Int, outcome: Outcome, goals: Goals, points: Option[Int] = None, specific: Option[Map[String, Any]] = None)
case class Goals(score: Int, conceded: Int)

case class PlayerStatsEntry(player: PlayerStatsPlayer, teamStats: List[PlayerTeamStatsEntry])
case class PlayerTeamStatsEntry(team: Team, points: Int, goals: Int, assists: Int)
case class PlayerStatsPlayer(id: String, name: PlayerName)

case class GeneralMatchStats(attendance: Int)

trait ScoresApiParser extends Logging {
  implicit val formats = DefaultFormats + DateTimeNoMillisSerializer

  def parseMatchSport(json: String): Option[Sport] = extractData(json).map(_.head)
  def parseMatchList(json: String): Option[List[Match]] = extractData(json) match {
    case Some(sports) => if (sports.nonEmpty) Some(extractMatchesFromSport(sports.head).get) else Some(List())
    case None => None
  }
  def parseLeagueTable(json: String): Option[List[LeagueTableEntry]] = {
    extractData(json) match {
      case Some(sports) => if (sports.nonEmpty) Some(extractLeagueTableFromSport(sports.head).get) else Some(List())
      case None => None
    }
  }
  def parsePlayerStats(json: String): Option[List[PlayerStatsEntry]] = {
    extractData(json) match {
      case Some(sports) =>
        if (sports.nonEmpty)
          Some(extractPlayerStatsFromSport(sports.head).get)
        else Some(List())
      case None => None
    }
  }

  protected def extractMatchesFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.matches
  protected def extractLeagueTableFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.standing
  protected def extractPlayerStatsFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.playerStatistics
  protected def extractData(json: String) = {
    Try {
      log.debug(s"Sport JSON: $json")
      parse(json).extract[Data]
    } match {
      case Success(s) => Some(s.data)
      case Failure(e) =>
        log.info(s"Failed to parse '$json': " + e)
        None
    }
  }
} 
Example 120
Source File: ScoresApiSupport.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.scores

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.ActorMaterializer
import liigavoitto.util.Logging
import org.joda.time.format.DateTimeFormat

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.{ Failure, Properties, Success, Try }

trait ScoresApiSupport extends Logging {
  implicit val system: ActorSystem
  implicit val ec = system.dispatcher
  implicit val fm = ActorMaterializer()

  val oneHundredMegabytes = 100000000

  val apiUrl = Properties.envOrElse("SCORES_API_URL", "http://scores.api.yle.fi/v0/")
  val scoresAuth = Map[String, String](
    "app_id" -> Properties.envOrElse("SCORES_API_APP_ID", ""),
    "app_key" -> Properties.envOrElse("SCORES_API_APP_KEY", "")
  )
  val dateFormat = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss")
  val timeout = 15.seconds

  protected def get(url: String) = {
    Try {
      val request = HttpRequest(GET, url)
      log.info("REQUEST: " + request)
      Http().singleRequest(request).map(r => getStr(r))
    } match {
      case Success(s) => s
      case Failure(e) =>
        log.warn(s"Failed to get $url: " + e.getMessage)
        e.printStackTrace()
        throw new RuntimeException("Failure: " + e)
    }
  }

  protected def getStr(r: HttpResponse) = {
    Try {
      val entity = Await.result(r.entity.withSizeLimit(oneHundredMegabytes).toStrict(timeout), timeout)
      entity.data.decodeString("UTF-8")
    } match {
      case Success(s) => s
      case Failure(e) => throw new RuntimeException(s"Scores api failure: " + e.getMessage)
    }
  }
} 
Example 121
Source File: package.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.actor

import grizzled.slf4j.Logging
import kafka.manager.features.{ClusterFeature, ClusterFeatures}
import org.apache.kafka.common.KafkaFuture.BiConsumer

import scala.util.{Failure, Try}


package object cluster {
  implicit class TryLogErrorHelper[T](t: Try[T]) extends Logging {
    def logError(s: => String) : Try[T] = {
      t match {
        case Failure(e) =>
          error(s, e)
        case _ => //do nothing
      }
      t
    }
  }

  implicit def toBiConsumer[A,B](fn: (A, B) => Unit): BiConsumer[A, B] = {
    new BiConsumer[A, B] {
      override def accept(a: A, b: B): Unit = {
        fn(a, b)
      }
    }
  }

  def featureGate[T](af: ClusterFeature)(fn: => Unit)(implicit features: ClusterFeatures) : Unit = {
    if(features.features(af)) {
      fn
    } else {
      //do nothing
    }
  }
  def featureGate[T](af: ClusterFeature, af2: ClusterFeature)(fn: => Unit)(implicit features: ClusterFeatures) : Unit = {
    if(features.features(af) && features.features(af2)) {
      fn
    } else {
      //do nothing
    }
  }
  def featureGate[T](af: ClusterFeature, af2: ClusterFeature, af3: ClusterFeature)(fn: => Unit)(implicit features: ClusterFeatures) : Unit = {
    if(features.features(af) && features.features(af2) && features.features(af3)) {
      fn
    } else {
      //do nothing
    }
  }
  def featureGateFold[T](af: ClusterFeature)(elseFn: => T, fn: => T)(implicit features: ClusterFeatures) : T = {
    if(features.features(af)) {
      fn
    } else {
      elseFn
    }
  }
} 
Example 122
Source File: KMFeature.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.features

import grizzled.slf4j.Logging
import kafka.manager.model.{Kafka_0_8_1_1, ClusterConfig}

import scala.collection.mutable.ListBuffer
import scala.util.{Success, Failure, Try}



trait KMFeature

sealed trait ClusterFeature extends KMFeature

case object KMLogKafkaFeature extends ClusterFeature
case object KMDeleteTopicFeature extends ClusterFeature
case object KMJMXMetricsFeature extends ClusterFeature
case object KMDisplaySizeFeature extends ClusterFeature
case object KMPollConsumersFeature extends ClusterFeature

object ClusterFeature extends Logging {
  import scala.reflect.runtime.universe

  val runtimeMirror = universe.runtimeMirror(getClass.getClassLoader)

  def from(s: String) : Option[ClusterFeature] = {
    Try {
          val clazz = s"features.$s"
          val module = runtimeMirror.staticModule(clazz)
          val obj = runtimeMirror.reflectModule(module)
          obj.instance match {
            case f: ClusterFeature =>
              f
            case _ =>
              throw new IllegalArgumentException(s"Unknown application feature $s")
          }
        } match {
      case Failure(t) =>
        error(s"Unknown application feature $s")
        None
      case Success(f) => Option(f)
    }
  }

}

case class ClusterFeatures(features: Set[ClusterFeature])

object ClusterFeatures {
  val default = ClusterFeatures(Set())
  
  def from(clusterConfig: ClusterConfig) : ClusterFeatures = {
    val buffer = new ListBuffer[ClusterFeature]
    
    if(clusterConfig.logkafkaEnabled)
      buffer+=KMLogKafkaFeature

    if(clusterConfig.jmxEnabled)
      buffer+=KMJMXMetricsFeature

    if(clusterConfig.displaySizeEnabled)
      buffer+=KMDisplaySizeFeature
    
    if(clusterConfig.version != Kafka_0_8_1_1)
      buffer+=KMDeleteTopicFeature

    if(clusterConfig.pollConsumers)
      buffer+=KMPollConsumersFeature

    ClusterFeatures(buffer.toSet)
  }
} 
Example 123
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 124
Source File: MethodTracer.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core.logging

import java.lang.reflect.Method

import com.comcast.money.annotations.{ Timed, Traced }
import com.comcast.money.core.{ Money, Tracer }
import com.comcast.money.core.async.AsyncNotifier
import com.comcast.money.core.internal.{ MDCSupport, SpanContext, SpanLocal }
import com.comcast.money.core.reflect.Reflections
import org.slf4j.MDC

import scala.util.{ Failure, Success, Try }

trait MethodTracer extends Reflections with TraceLogging {
  val tracer: Tracer = Money.Environment.tracer
  val asyncNotifier: AsyncNotifier = Money.Environment.asyncNotifier
  val mdcSupport: MDCSupport = new MDCSupport()
  val spanContext: SpanContext = SpanLocal

  def traceMethod(method: Method, annotation: Traced, args: Array[AnyRef], proceed: () => AnyRef): AnyRef = {
    val key = annotation.value()

    tracer.startSpan(key)
    recordTracedParameters(method, args, tracer)

    Try { proceed() } match {
      case Success(result) if annotation.async() =>
        traceAsyncResult(method, annotation, result) match {
          case Some(future) =>
            future
          case None =>
            tracer.stopSpan(true)
            result
        }
      case Success(result) =>
        tracer.stopSpan(true)
        result
      case Failure(exception) =>
        logException(exception)
        tracer.stopSpan(exceptionMatches(exception, annotation.ignoredExceptions()))
        throw exception
    }
  }

  def timeMethod(method: Method, annotation: Timed, proceed: () => AnyRef): AnyRef = {
    val key = annotation.value()
    try {
      tracer.startTimer(key)
      proceed()
    } finally {
      tracer.stopTimer(key)
    }
  }

  def traceAsyncResult(
    method: Method,
    annotation: Traced,
    returnValue: AnyRef): Option[AnyRef] = for {

    // resolve an async notification handler that supports the result
    handler <- asyncNotifier.resolveHandler(method.getReturnType, returnValue)

    // pop the current span from the stack as it will not be stopped by the tracer
    span <- spanContext.pop
    // capture the current MDC context to be applied on the callback thread
    mdc = Option(MDC.getCopyOfContextMap)

    result = handler.whenComplete(method.getReturnType, returnValue) { completed =>
      // reapply the MDC onto the callback thread
      mdcSupport.propogateMDC(mdc)

      // determine if the future completed successfully or exceptionally
      val result = completed match {
        case Success(_) => true
        case Failure(exception) =>
          logException(exception)
          exceptionMatches(exception, annotation.ignoredExceptions())
      }

      // stop the captured span with the success/failure flag
      span.stop(result)
      // clear the MDC from the callback thread
      MDC.clear()
    }
  } yield result
} 
Example 125
Source File: ScalaFutureNotificationHandler.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core.async

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Try }

class ScalaFutureNotificationHandler extends AbstractAsyncNotificationHandler[Future[_]] {
  implicit val executionContext: ExecutionContext = new DirectExecutionContext

  override def whenComplete(future: Future[_], f: Try[_] => Unit): Future[_] =
    future.asInstanceOf[Future[_]].transform(result => {
      f(Try(result))
      result
    }, throwable => {
      f(Failure(throwable))
      throwable
    })
} 
Example 126
Source File: ParquetWriterTask.scala    From gearpump-examples   with Apache License 2.0 5 votes vote down vote up
package io.gearpump.examples.kafka_hdfs_pipeline

import org.apache.avro.Schema
import io.gearpump.Message
import io.gearpump.cluster.UserConfig
import io.gearpump.examples.kafka_hdfs_pipeline.ParquetWriterTask._
import io.gearpump.streaming.task.{StartTime, Task, TaskContext}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.parquet.avro.AvroParquetWriter

import scala.util.{Failure, Success, Try}

class ParquetWriterTask(taskContext : TaskContext, config: UserConfig) extends Task(taskContext, config) {
  val outputFileName = taskContext.appName + ".parquet"
  val absolutePath = Option(getHdfs + config.getString(PARQUET_OUTPUT_DIRECTORY).getOrElse("/parquet") + "/" + outputFileName).map(deleteFile(_)).get
  val outputPath = new Path(absolutePath)
  var parquetWriter = new AvroParquetWriter[SpaceShuttleRecord](outputPath, SpaceShuttleRecord.SCHEMA$)

  def getYarnConf = new YarnConfiguration
  def getFs = FileSystem.get(getYarnConf)
  def getHdfs = new Path(getFs.getHomeDirectory, "gearpump")

  private def deleteFile(fileName: String): String = {
    val file = new Path(fileName)
    getFs.exists(file) match {
      case true =>
        getFs.delete(file,false)
      case false =>
    }
    fileName
  }

  override def onStart(startTime: StartTime): Unit = {
    LOG.info(s"ParquetWriter.onStart $absolutePath")
  }

  override def onNext(msg: Message): Unit = {
    Try({
      parquetWriter.write(msg.msg.asInstanceOf[SpaceShuttleRecord])
    }) match {
      case Success(ok) =>
      case Failure(throwable) =>
        LOG.error(s"failed ${throwable.getMessage}")
    }
  }

  override def onStop(): Unit = {
    LOG.info("ParquetWriter.onStop")
    parquetWriter.close()
  }
}

object ParquetWriterTask {
  val PARQUET_OUTPUT_DIRECTORY = "parquet.output.directory"
  val PARQUET_WRITER = "parquet.writer"
} 
Example 127
Source File: ScoringTask.scala    From gearpump-examples   with Apache License 2.0 5 votes vote down vote up
package io.gearpump.examples.kafka_hdfs_pipeline

import akka.io.IO
import akka.pattern.ask
import io.gearpump.Message
import io.gearpump.cluster.UserConfig
import io.gearpump.streaming.task.{Task, TaskContext}
import io.gearpump.util.Constants
import spray.can.Http
import spray.http.HttpMethods._
import spray.http.{HttpRequest, HttpResponse, Uri}
import upickle.default._

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.{Failure, Success, Try}


class ScoringTask(taskContext : TaskContext, config: UserConfig) extends Task(taskContext, config) {
  implicit val timeout = Constants.FUTURE_TIMEOUT
  implicit val ec: ExecutionContext = system.dispatcher

  import taskContext.output

  override def onNext(msg: Message): Unit = {
    Try( {
      val jsonData = msg.msg.asInstanceOf[Array[Byte]]
      val jsonValue = new String(jsonData)
      val spaceShuttleMessage = read[SpaceShuttleMessage](jsonValue)
      val vector = read[Array[Float]](spaceShuttleMessage.body)
      val featureVector = vector.drop(1)
      val featureVectorString = featureVector.mkString(",")
      val url = s"http://atk-scoringengine.demo-gotapaas.com/v1/models/DemoModel/score?data=$featureVectorString"

      val result = Await.result((IO(Http) ? HttpRequest(GET, Uri(url))).asInstanceOf[Future[HttpResponse]], 5 seconds)
      val entity = result.entity.data.asString.toFloat
      entity match {
        case 1.0F =>
        case anomaly:Float =>
          output(Message(SpaceShuttleRecord(System.currentTimeMillis, anomaly), System.currentTimeMillis))
      }
    }) match {
      case Success(ok) =>
      case Failure(throwable) =>
        LOG.error(s"failed ${throwable.getMessage}")
    }

  }
} 
Example 128
Source File: PipeLineSpec.scala    From gearpump-examples   with Apache License 2.0 5 votes vote down vote up
package io.gearpump.examples.kafka_hdfs_pipeline

import akka.actor.ActorSystem
import io.gearpump._
import io.gearpump.cluster.UserConfig
import io.gearpump.streaming.task.{StartTime, Task, TaskContext}
import io.gearpump.streaming.transaction.api.TimeReplayableSource
import io.gearpump.util.LogUtil
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfterAll, Matchers, PropSpec}
import org.slf4j.Logger

import scala.util.{Failure, Success, Try}

class SpaceShuttleReplayableSource extends TimeReplayableSource {
  val data = Array[String](
    """
      |{"id":"2a329674-12ad-49f7-b40d-6485aae0aae8","on":"2015-04-02T18:52:02.680178753Z","body":"[-0.414141,-0.0246564,-0.125,0.0140301,-0.474359,0.0256049,-0.0980392,0.463884,0.40836]"}
    """
      .stripMargin,
    """
      |{"id":"043ade58-2fbc-4fe2-8253-84ab181b8cfa","on":"2015-04-02T18:52:02.680078434Z","body": "[-0.414141,-0.0246564,-0.125,0.0140301,-0.474359,0.0256049,-0.0980392,0.463884,0.40836]"}
    """.stripMargin,
    """
      |{"id":"043ade58-2fbc-4fe2-8253-84ab181b8cfa","on":"2015-04-02T18:52:02.680078434Z","body": "[-0.414141,-0.0246564,-0.125,0.0140301,-0.474359,0.0256049,-0.0980392,0.463884,0.40836]"}
    """.stripMargin
  )

  override def open(context: TaskContext, startTime: Option[TimeStamp]): Unit = {}

  override def read(num: Int): List[Message] = List(Message(data(0)), Message(data(1)), Message(data(2)))

  override def close(): Unit = {}
}

class SpaceShuttleProducer(taskContext : TaskContext, conf: UserConfig)
  extends Task(taskContext, conf) {

  import taskContext.{output, parallelism}

  private val batchSize = 3

  val taskParallelism = parallelism

  private val source: TimeReplayableSource = new SpaceShuttleReplayableSource()
  private var startTime: TimeStamp = 0L

  override def onStart(newStartTime: StartTime): Unit = {
    startTime = newStartTime.startTime
    LOG.info(s"start time $startTime")
    source.open(taskContext, Some(startTime))
    self ! Message("start", System.currentTimeMillis())
  }

  override def onNext(msg: Message): Unit = {
    Try({

      source.read(batchSize).foreach(msg => {
        output(msg)
      })
    }) match {
      case Success(ok) =>
      case Failure(throwable) =>
        LOG.error(s"failed ${throwable.getMessage}")
    }
    self ! Message("continue", System.currentTimeMillis())
  }

  override def onStop(): Unit = {
    LOG.info("closing kafka source...")
    source.close()
  }
}

class PipeLineSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll {
  val LOG: Logger = LogUtil.getLogger(getClass)
  implicit var system: ActorSystem = null

  override def beforeAll(): Unit = {
    system = ActorSystem("PipeLineSpec")
  }

  override def afterAll(): Unit = {
    system.shutdown()
  }

  property("PipeLineSpec should be able to create a DataSource") {
    Option(new SpaceShuttleReplayableSource) match {
      case Some(replayableSource) =>
      case None =>
        assert(false)
    }
  }
} 
Example 129
Source File: LauncherUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.utils

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.models.policy.PolicyStatusModel
import com.typesafe.config.Config

import scala.util.{Failure, Success, Try}

trait LauncherUtils extends SLF4JLogging{

  def loggingResponsePolicyStatus(response: Try[PolicyStatusModel]): Unit =
    response match {
      case Success(statusModel) =>
        log.info(s"Policy status model created or updated correctly: " +
          s"\n\tId: ${statusModel.id}\n\tStatus: ${statusModel.status}")
      case Failure(e) =>
        log.error(s"Policy status model creation failure. Error: ${e.getLocalizedMessage}", e)
    }

  def getZookeeperConfig: Config = SpartaConfig.getZookeeperConfig.getOrElse {
    val message = "Impossible to extract Zookeeper Configuration"
    log.error(message)
    throw new RuntimeException(message)
  }
} 
Example 130
Source File: PolicyConfigUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.utils

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.models.policy.PolicyModel
import com.typesafe.config.Config

import scala.util.{Failure, Success, Try}

trait PolicyConfigUtils extends SLF4JLogging {

  val DetailConfig = SpartaConfig.getDetailConfig.getOrElse {
    val message = "Impossible to extract Detail Configuration"
    log.error(message)
    throw new RuntimeException(message)
  }

  def isExecutionType(policy: PolicyModel, executionType: String): Boolean =
    policy.executionMode match {
      case Some(executionMode) if executionMode.nonEmpty => executionMode.equalsIgnoreCase(executionType)
      case _ => DetailConfig.getString(ExecutionMode).equalsIgnoreCase(executionType)
    }

  def isCluster(policy: PolicyModel, clusterConfig: Config): Boolean =
    policy.sparkConf.find(sparkProp =>
      sparkProp.sparkConfKey == DeployMode && sparkProp.sparkConfValue == ClusterValue) match {
      case Some(mode) => true
      case _ => Try(clusterConfig.getString(DeployMode)) match {
        case Success(mode) => mode == ClusterValue
        case Failure(e) => false
      }
    }

  def getDetailExecutionMode(policy: PolicyModel, clusterConfig: Config): String =
    if (isExecutionType(policy, AppConstant.ConfigLocal)) LocalValue
    else {
      val execMode = executionMode(policy)
      if (isCluster(policy, clusterConfig)) s"$execMode-$ClusterValue"
      else s"$execMode-$ClientValue"
    }

  def pluginsJars(policy: PolicyModel): Seq[String] =
    policy.userPluginsJars.map(userJar => userJar.jarPath.trim)

  def gracefulStop(policy: PolicyModel): Option[Boolean] = policy.stopGracefully

  def executionMode(policy: PolicyModel): String = policy.executionMode match {
    case Some(mode) if mode.nonEmpty => mode
    case _ => DetailConfig.getString(ExecutionMode)
  }
} 
Example 131
Source File: ClusterCheckerService.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.services

import akka.actor.{ActorContext, ActorRef}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success}

class ClusterCheckerService(val curatorFramework: CuratorFramework) extends PolicyStatusUtils {

  def checkPolicyStatus(policy: PolicyModel, launcherActor: ActorRef, akkaContext: ActorContext): Unit = {
    findStatusById(policy.id.get) match {
      case Success(policyStatus) =>
        if (policyStatus.status == Launched || policyStatus.status == Starting || policyStatus.status == Uploaded ||
          policyStatus.status == Stopping || policyStatus.status == NotStarted) {
          val information = s"The checker detects that the policy not start/stop correctly"
          log.error(information)
          updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Some(information)))
          akkaContext.stop(launcherActor)
        } else {
          val information = s"The checker detects that the policy start/stop correctly"
          log.info(information)
          updateStatus(PolicyStatusModel(id = policy.id.get, status = NotDefined, statusInfo = Some(information)))
        }
      case Failure(exception) =>
        log.error(s"Error when extract policy status in scheduler task.", exception)
    }
  }
} 
Example 132
Source File: HdfsUtilsTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.utils

import java.io.{FileNotFoundException, InputStream}

import org.apache.hadoop.fs.{FileSystem, _}
import org.junit.runner.RunWith
import org.mockito.Mockito._
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar

import scala.util.{Failure, Try}

@RunWith(classOf[JUnitRunner])
class HdfsUtilsTest extends FlatSpec with ShouldMatchers with MockitoSugar {

  val fileSystem: FileSystem = mock[FileSystem]

  val utils = new HdfsUtils(fileSystem, "stratio")

  "hdfs utils" should "getfiles from a path" in {
    val expected = Array(mock[FileStatus])
    when(fileSystem.listStatus(new Path("myTestPath"))).thenReturn(expected)
    val result = utils.getFiles("myTestPath")
    result should be(expected)
  }

  it should "return single file as inputStream" in {
    val expected: InputStream = mock[FSDataInputStream]
    when(fileSystem.open(new Path("testFile"))).thenReturn(expected.asInstanceOf[FSDataInputStream])
    val result: InputStream = utils.getFile("testFile")
    result should be(expected)
  }

  it should "write" in {
    val result = Try(utils.write("from", "to", true)) match {
      case Failure(ex: Throwable) => ex
    }
    result.isInstanceOf[FileNotFoundException] should be(true)
  }

  it should "write without override" in {
    val result = Try(utils.write("from", "to", false)) match {
      case Failure(ex: Throwable) => ex
    }
    result.isInstanceOf[FileNotFoundException] should be(true)
  }
} 
Example 133
Source File: WriterHelper.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.writer

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.driver.schema.SchemaHelper
import com.stratio.sparta.sdk.pipeline.output.Output
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.StructType

import scala.util.{Failure, Success, Try}

object WriterHelper extends SLF4JLogging {

  def write(dataFrame: DataFrame,
            writerOptions: WriterOptions,
            extraSaveOptions: Map[String, String],
            outputs: Seq[Output]): DataFrame = {
    val saveOptions = extraSaveOptions ++
      writerOptions.tableName.fold(Map.empty[String, String]) { outputTableName =>
        Map(Output.TableNameKey -> outputTableName)
      } ++
      writerOptions.partitionBy.fold(Map.empty[String, String]) { partition =>
        Map(Output.PartitionByKey -> partition)
      } ++
      writerOptions.primaryKey.fold(Map.empty[String, String]) { key =>
        Map(Output.PrimaryKey -> key)
      }
    val outputTableName = saveOptions.getOrElse(Output.TableNameKey, "undefined")
    val autoCalculatedFieldsDf = DataFrameModifierHelper.applyAutoCalculateFields(dataFrame,
        writerOptions.autoCalculateFields,
        StructType(dataFrame.schema.fields ++ SchemaHelper.getStreamWriterPkFieldsMetadata(writerOptions.primaryKey)))

    writerOptions.outputs.foreach(outputName =>
      outputs.find(output => output.name == outputName) match {
        case Some(outputWriter) => Try {
          outputWriter.save(autoCalculatedFieldsDf, writerOptions.saveMode, saveOptions)
        } match {
          case Success(_) =>
            log.debug(s"Data stored in $outputTableName")
          case Failure(e) =>
            log.error(s"Something goes wrong. Table: $outputTableName")
            log.error(s"Schema. ${autoCalculatedFieldsDf.schema}")
            log.error(s"Head element. ${autoCalculatedFieldsDf.head}")
            log.error(s"Error message : ${e.getMessage}")
        }
        case None => log.error(s"The output added : $outputName not match in the outputs")
      })
    autoCalculatedFieldsDf
  }

} 
Example 134
Source File: TriggerWriterHelper.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.writer

import akka.event.slf4j.SLF4JLogging
import org.apache.spark.sql.{DataFrame, Row}
import com.stratio.sparta.driver.exception.DriverException
import com.stratio.sparta.driver.factory.SparkContextFactory
import com.stratio.sparta.driver.schema.SchemaHelper
import com.stratio.sparta.driver.step.Trigger
import com.stratio.sparta.sdk.pipeline.output.Output
import org.apache.spark.sql.types.StructType
import org.apache.spark.streaming.dstream.DStream

import scala.util.{Failure, Success, Try}

object TriggerWriterHelper extends SLF4JLogging {

  def writeStream(triggers: Seq[Trigger],
                  inputTableName: String,
                  outputs: Seq[Output],
                  streamData: DStream[Row],
                  schema: StructType): Unit = {
    streamData.foreachRDD(rdd => {
      val parsedDataFrame = SparkContextFactory.sparkSessionInstance.createDataFrame(rdd, schema)

      writeTriggers(parsedDataFrame, triggers, inputTableName, outputs)
    })
  }

  //scalastyle:off
  def writeTriggers(dataFrame: DataFrame,
                    triggers: Seq[Trigger],
                    inputTableName: String,
                    outputs: Seq[Output]): Unit = {
    val sparkSession = dataFrame.sparkSession
    if (triggers.nonEmpty && isCorrectTableName(inputTableName)) {
      if (!sparkSession.catalog.tableExists(inputTableName)) {
        dataFrame.createOrReplaceTempView(inputTableName)
        log.debug(s"Registering temporal table in Spark with name: $inputTableName")
      }
      val tempTables = triggers.flatMap(trigger => {
        log.debug(s"Executing query in Spark: ${trigger.sql}")
        val queryDf = Try(sparkSession.sql(trigger.sql)) match {
          case Success(sqlResult) => sqlResult
          case Failure(exception: org.apache.spark.sql.AnalysisException) =>
            log.warn("Warning running analysis in Catalyst in the query ${trigger.sql} in trigger ${trigger.name}",
              exception.message)
            throw DriverException(exception.getMessage, exception)
          case Failure(exception) =>
            log.warn(s"Warning running sql in the query ${trigger.sql} in trigger ${trigger.name}", exception.getMessage)
            throw DriverException(exception.getMessage, exception)
        }
        val extraOptions = Map(Output.TableNameKey -> trigger.name)

        if (!queryDf.rdd.isEmpty()) {
          val autoCalculatedFieldsDf = WriterHelper.write(queryDf, trigger.writerOptions, extraOptions, outputs)
          if (isCorrectTableName(trigger.name) && !sparkSession.catalog.tableExists(trigger.name)) {
            autoCalculatedFieldsDf.createOrReplaceTempView(trigger.name)
            log.debug(s"Registering temporal table in Spark with name: ${trigger.name}")
          }
          else log.warn(s"The trigger ${trigger.name} have incorrect name, is impossible to register as temporal table")

          Option(trigger.name)
        } else None
      })
      tempTables.foreach(tableName =>
        if (isCorrectTableName(tableName) && sparkSession.catalog.tableExists(tableName)) {
          sparkSession.catalog.dropTempView(tableName)
          log.debug(s"Dropping temporal table in Spark with name: $tableName")
        } else log.debug(s"Impossible to drop table in Spark with name: $tableName"))

      if (isCorrectTableName(inputTableName) && sparkSession.catalog.tableExists(inputTableName)) {
        sparkSession.catalog.dropTempView(inputTableName)
        log.debug(s"Dropping temporal table in Spark with name: $inputTableName")
      } else log.debug(s"Impossible to drop table in Spark: $inputTableName")
    } else {
      if (triggers.nonEmpty && !isCorrectTableName(inputTableName))
        log.warn(s"Incorrect table name $inputTableName and the triggers could have errors and not have been " +
          s"executed")
    }
  }

  //scalastyle:on

  private[driver] def isCorrectTableName(tableName: String): Boolean =
    tableName.nonEmpty && tableName != "" &&
      tableName.toLowerCase != "select" &&
      tableName.toLowerCase != "project" &&
      !tableName.contains("-") && !tableName.contains("*") && !tableName.contains("/")
} 
Example 135
Source File: CubeOperations.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.step

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.sdk.pipeline.aggregation.cube.{DimensionValue, DimensionValuesTime, InputFields, TimeConfig}
import com.stratio.sparta.sdk.pipeline.schema.TypeOp
import com.stratio.sparta.sdk.utils.AggregationTime
import org.apache.spark.sql.Row
import org.apache.spark.streaming.dstream.DStream
import org.joda.time.DateTime

import scala.util.{Failure, Success, Try}


  def extractDimensionsAggregations(inputStream: DStream[Row]): DStream[(DimensionValuesTime, InputFields)] = {
    inputStream.mapPartitions(rows => rows.flatMap(row => Try {
      val dimensionValues = for {
        dimension <- cube.dimensions
        value = row.get(cube.initSchema.fieldIndex(dimension.field))
        (precision, dimValue) = dimension.dimensionType.precisionValue(dimension.precisionKey, value)
      } yield DimensionValue(dimension, TypeOp.transformValueByTypeOp(precision.typeOp, dimValue))

      cube.expiringDataConfig match {
        case None =>
          (DimensionValuesTime(cube.name, dimensionValues), InputFields(row, UpdatedValues))
        case Some(expiringDataConfig) =>
          val eventTime = extractEventTime(dimensionValues)
          val timeDimension = expiringDataConfig.timeDimension
          (DimensionValuesTime(cube.name, dimensionValues, Option(TimeConfig(eventTime, timeDimension))),
            InputFields(row, UpdatedValues))
      }
    } match {
      case Success(dimensionValuesTime) =>
        Some(dimensionValuesTime)
      case Failure(exception) =>
        val error = s"Failure[Aggregations]: ${row.toString} | ${exception.getLocalizedMessage}"
        log.error(error, exception)
        None
    }), true)
  }

  private[driver] def extractEventTime(dimensionValues: Seq[DimensionValue]) = {

    val timeDimension = cube.expiringDataConfig.get.timeDimension
    val dimensionsDates =
      dimensionValues.filter(dimensionValue => dimensionValue.dimension.name == timeDimension)

    if (dimensionsDates.isEmpty) getDate
    else AggregationTime.getMillisFromSerializable(dimensionsDates.head.value)
  }

  private[driver] def getDate: Long = {
    val checkpointGranularity = cube.expiringDataConfig.get.granularity

    AggregationTime.truncateDate(DateTime.now(), checkpointGranularity)
  }
} 
Example 136
Source File: LocalListenerUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.utils

import com.stratio.sparta.driver.factory.SparkContextFactory._
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import org.apache.curator.framework.recipes.cache.NodeCache

import scala.util.{Failure, Success, Try}

trait LocalListenerUtils extends PolicyStatusUtils {

  def killLocalContextListener(policy: PolicyModel, name: String): Unit = {
    log.info(s"Listener added to ${policy.name} with id: ${policy.id.get}")
    addListener(policy.id.get, (policyStatus: PolicyStatusModel, nodeCache: NodeCache) => {
      synchronized {
        if (policyStatus.status == Stopping) {
          try {
            log.info("Stopping message received from Zookeeper")
            closeContexts(policy.id.get)
          } finally {
            Try(nodeCache.close()) match {
              case Success(_) =>
                log.info("Node cache closed correctly")
              case Failure(e) =>
                log.error(s"The nodeCache in Zookeeper is not closed correctly", e)
            }
          }
        }
      }
    })
  }

  private[driver] def closeContexts(policyId: String): Unit = {
    val information = "The Context have been stopped correctly in the local listener"
    log.info(information)
    updateStatus(PolicyStatusModel(id = policyId, status = Stopped, statusInfo = Some(information)))
    destroySparkContext()
  }
} 
Example 137
Source File: MarathonDriver.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver

import akka.actor.{ActorSystem, Props}
import com.google.common.io.BaseEncoding
import com.stratio.sparta.driver.actor.MarathonAppActor
import com.stratio.sparta.driver.actor.MarathonAppActor.StartApp
import com.stratio.sparta.driver.exception.DriverException
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AkkaConstant
import com.stratio.sparta.serving.core.curator.CuratorFactoryHolder
import com.stratio.sparta.serving.core.utils.PluginsFilesUtils
import com.typesafe.config.ConfigFactory

import scala.util.{Failure, Success, Try}

object MarathonDriver extends PluginsFilesUtils {

  val NumberOfArguments = 3
  val PolicyIdIndex = 0
  val ZookeeperConfigurationIndex = 1
  val DetailConfigurationIndex = 2

  def main(args: Array[String]): Unit = {
    assert(args.length == NumberOfArguments,
      s"Invalid number of arguments: ${args.length}, args: $args, expected: $NumberOfArguments")
    Try {
      val policyId = args(PolicyIdIndex)
      val zookeeperConf = new String(BaseEncoding.base64().decode(args(ZookeeperConfigurationIndex)))
      val detailConf = new String(BaseEncoding.base64().decode(args(DetailConfigurationIndex)))
      initSpartaConfig(zookeeperConf, detailConf)
      val curatorInstance = CuratorFactoryHolder.getInstance()
      val system = ActorSystem(policyId)
      val marathonAppActor =
        system.actorOf(Props(new MarathonAppActor(curatorInstance)), AkkaConstant.MarathonAppActorName)

      marathonAppActor ! StartApp(policyId)
    } match {
      case Success(_) =>
        log.info("Initiated Marathon App environment")
      case Failure(driverException: DriverException) =>
        log.error(driverException.msg, driverException.getCause)
        throw driverException
      case Failure(exception) =>
        log.error(s"Error initiating Marathon App environment: ${exception.getLocalizedMessage}", exception)
        throw exception
    }
  }

  def initSpartaConfig(zKConfig: String, detailConf: String): Unit = {
    val configStr = s"${detailConf.stripPrefix("{").stripSuffix("}")}" +
      s"\n${zKConfig.stripPrefix("{").stripSuffix("}")}"
    log.info(s"Parsed config: sparta { $configStr }")
    val composedStr = s" sparta { $configStr } "
    SpartaConfig.initMainWithFallbackConfig(ConfigFactory.parseString(composedStr))
  }
} 
Example 138
Source File: ParserStage.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.stage

import java.io.Serializable

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.driver.writer.{TransformationsWriterHelper, WriterOptions}
import com.stratio.sparta.sdk.pipeline.output.Output
import com.stratio.sparta.sdk.pipeline.transformation.Parser
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, TransformationModel}
import com.stratio.sparta.serving.core.utils.ReflectionUtils
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
import org.apache.spark.streaming.dstream.DStream

import scala.util.{Failure, Success, Try}

trait ParserStage extends BaseStage {
  this: ErrorPersistor =>

  def parserStage(refUtils: ReflectionUtils,
                  schemas: Map[String, StructType]): (Seq[Parser], Option[WriterOptions]) =
    (policy.transformations.get.transformationsPipe.map(parser => createParser(parser, refUtils, schemas)),
      policy.transformations.get.writer.map(writer => WriterOptions(
        writer.outputs,
        writer.saveMode,
        writer.tableName,
        getAutoCalculatedFields(writer.autoCalculatedFields),
        writer.partitionBy,
        writer.primaryKey
      )))

  private[driver] def createParser(model: TransformationModel,
                           refUtils: ReflectionUtils,
                           schemas: Map[String, StructType]): Parser = {
    val classType = model.configuration.getOrElse(AppConstant.CustomTypeKey, model.`type`).toString
    val errorMessage = s"Something gone wrong creating the parser: $classType. Please re-check the policy."
    val okMessage = s"Parser: $classType created correctly."
    generalTransformation(PhaseEnum.Parser, okMessage, errorMessage) {
      val outputFieldsNames = model.outputFieldsTransformed.map(_.name)
      val schema = schemas.getOrElse(model.order.toString, throw new Exception("Can not find transformation schema"))
      refUtils.tryToInstantiate[Parser](classType + Parser.ClassSuffix, (c) =>
        c.getDeclaredConstructor(
          classOf[Integer],
          classOf[Option[String]],
          classOf[Seq[String]],
          classOf[StructType],
          classOf[Map[String, Serializable]])
          .newInstance(model.order, model.inputField, outputFieldsNames, schema, model.configuration)
          .asInstanceOf[Parser])
    }
  }
}

object ParserStage extends SLF4JLogging {

  def executeParsers(row: Row, parsers: Seq[Parser]): Seq[Row] =
    if (parsers.size == 1) parseEvent(row, parsers.head)
    else parseEvent(row, parsers.head).flatMap(eventParsed => executeParsers(eventParsed, parsers.drop(1)))

  def parseEvent(row: Row, parser: Parser): Seq[Row] =
    Try {
      parser.parse(row)
    } match {
      case Success(eventParsed) =>
        eventParsed
      case Failure(exception) =>
        val error = s"Failure[Parser]: ${row.mkString(",")} | Message: ${exception.getLocalizedMessage}" +
          s" | Parser: ${parser.getClass.getSimpleName}"
        log.error(error, exception)
        Seq.empty[Row]
    }

  def applyParsers(input: DStream[Row],
                   parsers: Seq[Parser],
                   schema: StructType,
                   outputs: Seq[Output],
                   writerOptions: Option[WriterOptions]): DStream[Row] = {
    val transformedData = if (parsers.isEmpty) input
    else input.flatMap(row => executeParsers(row, parsers))

    writerOptions.foreach(options =>
      TransformationsWriterHelper.writeTransformations(transformedData, schema, outputs, options))
    transformedData
  }
} 
Example 139
Source File: BaseStage.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.stage

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.driver.utils.StageUtils
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum.NotDefined
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success, Try}


trait ErrorPersistor {
  def persistError(error: PolicyErrorModel): Unit
}

trait ZooKeeperError extends ErrorPersistor with PolicyStatusUtils {

  val curatorFramework: CuratorFramework

  def policy: PolicyModel

  def persistError(error: PolicyErrorModel): Unit =
    updateStatus(PolicyStatusModel(policy.id.get, NotDefined, None, None, lastError = Some(error)))

  def clearError(): Unit =
    clearLastError(policy.id.get)
}

trait LogError extends ErrorPersistor with SLF4JLogging {
  def persistError(error: PolicyErrorModel): Unit = log.error(s"This error was not saved to ZK : $error")
}

trait BaseStage extends SLF4JLogging with StageUtils {
  this: ErrorPersistor =>
  def policy: PolicyModel

  def generalTransformation[T](code: PhaseEnum.Value, okMessage: String, errorMessage: String)
                              (f: => T): T = {
    Try(f) match {
      case Success(result) =>
        log.info(okMessage)
        result
      case Failure(ex) => throw logAndCreateEx(code, ex, policy, errorMessage)
    }
  }

  def logAndCreateEx(code: PhaseEnum.Value,
                      ex: Throwable,
                      policy: PolicyModel,
                      message: String
                    ): IllegalArgumentException = {
    val originalMsg = ex.getCause match {
      case _: ClassNotFoundException => "The component couldn't be found in classpath. Please check the type."
      case exception: Throwable => exception.toString
      case _ => ex.toString
    }
    val policyError = PolicyErrorModel(message, code, originalMsg)
    log.error("An error was detected : {}", policyError)
    Try {
      persistError(policyError)
    } recover {
      case e => log.error(s"Error while persisting error: $policyError", e)
    }
    new IllegalArgumentException(message, ex)
  }

} 
Example 140
Source File: Parser.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.sdk.pipeline.transformation

import java.io.{Serializable => JSerializable}

import com.stratio.sparta.sdk.pipeline.schema.TypeOp
import com.stratio.sparta.sdk.properties.{CustomProperties, Parameterizable}
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructField, StructType}

import scala.util.{Failure, Success, Try}

abstract class Parser(order: Integer,
                      inputField: Option[String],
                      outputFields: Seq[String],
                      schema: StructType,
                      properties: Map[String, JSerializable])
  extends Parameterizable(properties) with Ordered[Parser] with CustomProperties {

  val customKey = "transformationOptions"
  val customPropertyKey = "transformationOptionsKey"
  val customPropertyValue = "transformationOptionsValue"
  val propertiesWithCustom = properties ++ getCustomProperties

  val outputFieldsSchema = schema.fields.filter(field => outputFields.contains(field.name))

  val inputFieldRemoved = Try(propertiesWithCustom.getBoolean("removeInputField")).getOrElse(false)

  val inputFieldIndex = inputField match {
    case Some(field) => Try(schema.fieldIndex(field)).getOrElse(0)
    case None => 0
  }

  val whenErrorDo = Try(WhenError.withName(propertiesWithCustom.getString("whenError")))
    .getOrElse(WhenError.Error)

  def parse(data: Row): Seq[Row]

  def getOrder: Integer = order

  def checkFields(keyMap: Map[String, JSerializable]): Map[String, JSerializable] =
    keyMap.flatMap(key => if (outputFields.contains(key._1)) Some(key) else None)

  def compare(that: Parser): Int = this.getOrder.compareTo(that.getOrder)

  //scalastyle:off
  def returnWhenError(exception: Exception): Null =
  whenErrorDo match {
    case WhenError.Null => null
    case _ => throw exception
  }

  //scalastyle:on

  def parseToOutputType(outSchema: StructField, inputValue: Any): Any =
    Try(TypeOp.transformValueByTypeOp(outSchema.dataType, inputValue.asInstanceOf[Any]))
      .getOrElse(returnWhenError(new IllegalStateException(
        s"Error parsing to output type the value: ${inputValue.toString}")))

  def returnData(newData: Try[Seq[_]], prevData: Seq[_]): Seq[Row] =
    newData match {
      case Success(data) => Seq(Row.fromSeq(prevData ++ data))
      case Failure(e) => whenErrorDo match {
        case WhenError.Discard => Seq.empty[Row]
        case _ => throw e
      }
    }

  def returnData(newData: Try[Row], prevData: Row): Seq[Row] =
    newData match {
      case Success(data) => Seq(Row.merge(prevData, data))
      case Failure(e) => whenErrorDo match {
        case WhenError.Discard => Seq.empty[Row]
        case _ => throw e
      }
    }

  def removeIndex(row: Seq[_], inputFieldIndex: Int): Seq[_] = if (row.size < inputFieldIndex) row
    else row.take(inputFieldIndex) ++ row.drop(inputFieldIndex + 1)

  def removeInputField(row: Row): Seq[_] = {
    if (inputFieldRemoved && inputField.isDefined)
      removeIndex(row.toSeq, inputFieldIndex)
    else
      row.toSeq
  }


}

object Parser {

  final val ClassSuffix = "Parser"
  final val DefaultOutputType = "string"
  final val TypesFromParserClass = Map("datetime" -> "timestamp")
} 
Example 141
Source File: MetadataActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import java.util.regex.Pattern

import akka.actor.Actor
import com.github.nscala_time.time.Imports.{DateTime, DateTimeFormat}
import com.stratio.sparta.serving.api.actor.MetadataActor.ExecuteBackup
import com.stratio.sparta.serving.api.actor.MetadataActor._
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.api.utils.{BackupRestoreUtils, FileActorUtils}
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.exception.ServingCoreException
import com.stratio.sparta.serving.core.helpers.InfoHelper
import com.stratio.sparta.serving.core.models.SpartaSerializer
import com.stratio.sparta.serving.core.models.files.{BackupRequest, SpartaFilesResponse}
import spray.http.BodyPart
import spray.httpx.Json4sJacksonSupport

import scala.util.{Failure, Success, Try}

class MetadataActor extends Actor with Json4sJacksonSupport with BackupRestoreUtils with SpartaSerializer
  with FileActorUtils {

  //The dir where the backups will be saved
  val targetDir = Try(SpartaConfig.getDetailConfig.get.getString(BackupsLocation)).getOrElse(DefaultBackupsLocation)
  override val apiPath = HttpConstant.MetadataPath
  override val patternFileName = Option(Pattern.compile(""".*\.json""").asPredicate())

  //The dir where the jars will be saved
  val zkConfig = Try(SpartaConfig.getZookeeperConfig.get)
    .getOrElse(throw new ServingCoreException("Zookeeper configuration is mandatory"))
  override val uri = Try(zkConfig.getString("connectionString")).getOrElse(DefaultZKConnection)
  override val connectionTimeout = Try(zkConfig.getInt("connectionTimeout")).getOrElse(DefaultZKConnectionTimeout)
  override val sessionTimeout = Try(zkConfig.getInt("sessionTimeout")).getOrElse(DefaultZKSessionTimeout)

  override def receive: Receive = {
    case UploadBackups(files) => if (files.isEmpty) errorResponse() else uploadBackups(files)
    case ListBackups => browseBackups()
    case BuildBackup => buildBackup()
    case DeleteBackups => deleteBackups()
    case CleanMetadata => cleanMetadata()
    case DeleteBackup(fileName) => deleteBackup(fileName)
    case ExecuteBackup(backupRequest) => executeBackup(backupRequest)
    case _ => log.info("Unrecognized message in Backup/Restore Actor")
  }

  def executeBackup(backupRequest: BackupRequest): Unit =
    sender ! BackupResponse(Try{
      importer("/", s"$targetDir/${backupRequest.fileName}", backupRequest.deleteAllBefore)
    })

  def errorResponse(): Unit =
    sender ! SpartaFilesResponse(Failure(new IllegalArgumentException(s"At least one file is expected")))

  def deleteBackups(): Unit = sender ! BackupResponse(deleteFiles())

  def cleanMetadata(): Unit = sender ! BackupResponse(Try(cleanZk(BaseZKPath)))

  def buildBackup(): Unit = {
    val format = DateTimeFormat.forPattern("yyyy-MM-dd-hh:mm:ss")
    val appInfo = InfoHelper.getAppInfo
    Try {
      dump(BaseZKPath, s"$targetDir/backup-${format.print(DateTime.now)}-${appInfo.pomVersion}.json")
    } match {
      case Success(_) =>
        sender ! SpartaFilesResponse(browseDirectory())
      case Failure(e) =>
        sender ! BackupResponse(Try(throw e))
    }
  }

  def deleteBackup(fileName: String): Unit = sender ! BackupResponse(deleteFile(fileName))

  def browseBackups(): Unit = sender ! SpartaFilesResponse(browseDirectory())

  def uploadBackups(files: Seq[BodyPart]): Unit = sender ! SpartaFilesResponse(uploadFiles(files))
}

object MetadataActor {

  case class UploadBackups(files: Seq[BodyPart])

  case class BackupResponse(status: Try[_])

  case class ExecuteBackup(backupRequest: BackupRequest)

  case object ListBackups

  case object BuildBackup

  case object DeleteBackups

  case object CleanMetadata

  case class DeleteBackup(fileName: String)

} 
Example 142
Source File: MarathonLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, Cancellable, PoisonPill}
import com.stratio.sparta.serving.core.marathon.MarathonService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.models.submit.SubmitRequest
import com.stratio.sparta.serving.core.services.ClusterCheckerService
import com.stratio.sparta.serving.core.utils._
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success, Try}

class MarathonLauncherActor(val curatorFramework: CuratorFramework) extends Actor
  with LauncherUtils with SchedulerUtils with SparkSubmitUtils with ClusterListenerUtils with ArgumentsUtils
  with PolicyStatusUtils with RequestUtils {

  private val clusterCheckerService = new ClusterCheckerService(curatorFramework)
  private val checkersPolicyStatus = scala.collection.mutable.ArrayBuffer.empty[Cancellable]

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => initializeSubmitRequest(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Marathon Launcher Actor")
  }

  override def postStop(): Unit = checkersPolicyStatus.foreach(_.cancel())

  def initializeSubmitRequest(policy: PolicyModel): Unit = {
    Try {
      log.info(s"Initializing options for submit Marathon application associated to policy: ${policy.name}")
      val zookeeperConfig = getZookeeperConfig
      val clusterConfig = SpartaConfig.getClusterConfig(Option(ConfigMesos)).get
      val master = clusterConfig.getString(Master).trim
      val driverFile = extractMarathonDriverSubmit(policy, DetailConfig, SpartaConfig.getHdfsConfig)
      val pluginsFiles = pluginsJars(policy)
      val driverArguments =
        extractDriverArguments(policy, driverFile, clusterConfig, zookeeperConfig, ConfigMesos, pluginsFiles)
      val (sparkSubmitArguments, sparkConfigurations) =
        extractSubmitArgumentsAndSparkConf(policy, clusterConfig, pluginsFiles)
      val submitRequest = SubmitRequest(policy.id.get, SpartaDriverClass, driverFile, master, sparkSubmitArguments,
        sparkConfigurations, driverArguments, ConfigMesos, killUrl(clusterConfig))
      val detailExecMode = getDetailExecutionMode(policy, clusterConfig)

      createRequest(submitRequest).getOrElse(throw new Exception("Impossible to create submit request in persistence"))

      (new MarathonService(context, curatorFramework, policy, submitRequest), detailExecMode)
    } match {
      case Failure(exception) =>
        val information = s"Error when initializing Sparta Marathon App options"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        self ! PoisonPill
      case Success((marathonApp, detailExecMode)) =>
        val information = "Sparta Marathon App configurations initialized correctly"
        log.info(information)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = NotStarted,
          statusInfo = Option(information), lastExecutionMode = Option(detailExecMode)))
        marathonApp.launch(detailExecMode)
        addMarathonContextListener(policy.id.get, policy.name, context, Option(self))
        checkersPolicyStatus += scheduleOneTask(AwaitPolicyChangeStatus, DefaultAwaitPolicyChangeStatus)(
          clusterCheckerService.checkPolicyStatus(policy, self, context))
    }
  }
} 
Example 143
Source File: LocalLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, PoisonPill}
import com.stratio.sparta.driver.factory.SparkContextFactory
import com.stratio.sparta.driver.service.StreamingContextService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.helpers.{JarsHelper, PolicyHelper, ResourceManagerLinkHelper}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.{LauncherUtils, PolicyConfigUtils, PolicyStatusUtils}
import org.apache.curator.framework.CuratorFramework
import org.apache.spark.streaming.StreamingContext

import scala.util.{Failure, Success, Try}

class LocalLauncherActor(streamingContextService: StreamingContextService, val curatorFramework: CuratorFramework)
  extends Actor with PolicyConfigUtils with LauncherUtils with PolicyStatusUtils{

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => doInitSpartaContext(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Local Launcher Actor")
  }

  private def doInitSpartaContext(policy: PolicyModel): Unit = {
    val jars = PolicyHelper.jarsFromPolicy(policy)

    jars.foreach(file => JarsHelper.addToClasspath(file))
    Try {
      val startingInfo = s"Starting Sparta local job for policy"
      log.info(startingInfo)
      updateStatus(PolicyStatusModel(
        id = policy.id.get,
        status = PolicyStatusEnum.NotStarted,
        statusInfo = Some(startingInfo),
        lastExecutionMode = Option(AppConstant.LocalValue)
      ))
      val (spartaWorkflow, ssc) = streamingContextService.localStreamingContext(policy, jars)
      spartaWorkflow.setup()
      ssc.start()
      val startedInformation = s"The Sparta local job was started correctly"
      log.info(startedInformation)
      updateStatus(PolicyStatusModel(
        id = policy.id.get,
        status = PolicyStatusEnum.Started,
        statusInfo = Some(startedInformation),
        resourceManagerUrl = ResourceManagerLinkHelper.getLink(executionMode(policy), policy.monitoringLink)
      ))
      ssc.awaitTermination()
      spartaWorkflow.cleanUp()
    } match {
      case Success(_) =>
        val information = s"Stopped correctly Sparta local job"
        log.info(information)
        updateStatus(PolicyStatusModel(
          id = policy.id.get, status = PolicyStatusEnum.Stopped, statusInfo = Some(information)))
        self ! PoisonPill
      case Failure(exception) =>
        val information = s"Error initiating Sparta local job"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(
          id = policy.id.get,
          status = PolicyStatusEnum.Failed,
          statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        SparkContextFactory.destroySparkContext()
        self ! PoisonPill
    }
  }
} 
Example 144
Source File: FileActorUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.utils

import java.io.{BufferedOutputStream, File, FileOutputStream}
import java.net.InetAddress
import java.text.DecimalFormat
import java.util.function.Predicate

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.models.files.SpartaFile
import spray.http.BodyPart

import scala.util.{Failure, Success, Try}

trait FileActorUtils extends SLF4JLogging {

  //The dir where the files will be saved
  val targetDir: String
  val apiPath: String

  //Regexp for name validation
  val patternFileName: Option[Predicate[String]] = None

  def deleteFiles(): Try[_] =
    Try {
      val directory = new File(targetDir)
      if (directory.exists && directory.isDirectory)
        directory.listFiles.filter(_.isFile).toList.foreach { file =>
          if (patternFileName.isEmpty || (patternFileName.isDefined && patternFileName.get.test(file.getName)))
            file.delete()
        }
    }

  def deleteFile(fileName: String): Try[_] =
    Try {
      val plugin = new File(s"$targetDir/$fileName")
      if (plugin.exists && !plugin.isDirectory)
        plugin.delete()
    }

  def browseDirectory(): Try[Seq[SpartaFile]] =
    Try {
      val directory = new File(targetDir)
      if (directory.exists && directory.isDirectory) {
        directory.listFiles.filter(_.isFile).toList.flatMap { file =>
          if (patternFileName.isEmpty || (patternFileName.isDefined && patternFileName.get.test(file.getName)))
            Option(SpartaFile(file.getName, s"$url/${file.getName}", file.getAbsolutePath,
              sizeToMbFormat(file.length())))
          else None
        }
      } else Seq.empty[SpartaFile]
    }

  def uploadFiles(files: Seq[BodyPart]): Try[Seq[SpartaFile]] =
    Try {
      files.flatMap { file =>
        val fileNameOption = file.filename.orElse(file.name.orElse {
          log.warn(s"Is necessary one file name to upload files")
          None
        })
        fileNameOption.flatMap { fileName =>
          if (patternFileName.isEmpty || (patternFileName.isDefined && patternFileName.get.test(fileName))) {
            val localMachineDir = s"$targetDir/$fileName"

            Try(saveFile(file.entity.data.toByteArray, localMachineDir)) match {
              case Success(newFile) =>
                Option(SpartaFile(fileName, s"$url/$fileName", localMachineDir, sizeToMbFormat(newFile.length())))
              case Failure(e) =>
                log.error(s"Error saving file in path $localMachineDir", e)
                None
            }
          } else {
            log.warn(s"$fileName is Not a valid file name")
            None
          }
        }
      }
    }

  private def sizeToMbFormat(size: Long): String = {
    val formatter = new DecimalFormat("####.##")
    s"${formatter.format(size.toDouble / (1024 * 1024))} MB"
  }

  private def saveFile(array: Array[Byte], fileName: String): File = {
    log.info(s"Saving file to: $fileName")
    new File(fileName).getParentFile.mkdirs
    val bos = new BufferedOutputStream(new FileOutputStream(fileName))
    bos.write(array)
    bos.close()
    new File(fileName)
  }

  private def url: String = {
    val host = Try(InetAddress.getLocalHost.getHostName).getOrElse(SpartaConfig.apiConfig.get.getString("host"))
    val port = SpartaConfig.apiConfig.get.getInt("port")

    s"http://$host:$port/${HttpConstant.SpartaRootPath}/$apiPath"
  }
} 
Example 145
Source File: SSLSupport.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparkta.serving.api.ssl

import java.io.FileInputStream
import java.security.{KeyStore, SecureRandom}
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}

import com.stratio.sparta.serving.api.helpers.SpartaHelper.log
import com.stratio.sparta.serving.core.config.SpartaConfig
import spray.io._

import scala.util.{Failure, Success, Try}

trait SSLSupport {

  implicit def sslContext: SSLContext = {
    val context = SSLContext.getInstance("TLS")
    if(isHttpsEnabled) {
      val keyStoreResource = SpartaConfig.apiConfig.get.getString("certificate-file")
      val password = SpartaConfig.apiConfig.get.getString("certificate-password")

      val keyStore = KeyStore.getInstance("jks")
      keyStore.load(new FileInputStream(keyStoreResource), password.toCharArray)
      val keyManagerFactory = KeyManagerFactory.getInstance("SunX509")
      keyManagerFactory.init(keyStore, password.toCharArray)
      val trustManagerFactory = TrustManagerFactory.getInstance("SunX509")
      trustManagerFactory.init(keyStore)
      context.init(keyManagerFactory.getKeyManagers, trustManagerFactory.getTrustManagers, new SecureRandom)
    }
    context
  }

  implicit def sslEngineProvider: ServerSSLEngineProvider = {
    ServerSSLEngineProvider { engine =>
      engine.setEnabledCipherSuites(Array(
        "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
        "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_RSA_WITH_AES_256_CBC_SHA256",
        "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384",
        "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
        "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA",
        "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"))
      engine.setEnabledProtocols(Array( "TLSv1.2" ))
      engine
    }
  }

  def isHttpsEnabled: Boolean =
    SpartaConfig.getSprayConfig match {
      case Some(config) =>
        Try(config.getValue("ssl-encryption")) match {
          case Success(value) =>
            "on".equals(value.unwrapped())
          case Failure(e) =>
            log.error("Incorrect value in ssl-encryption option, setting https disabled", e)
            false
        }
      case None =>
        log.warn("Impossible to get spray config, setting https disabled")
        false
    }
} 
Example 146
Source File: DriverActorTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import java.nio.file.{Files, Path}

import akka.actor.{ActorSystem, Props}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestKit}
import akka.util.Timeout
import com.stratio.sparta.serving.api.actor.DriverActor.UploadDrivers
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.SpartaSerializer
import com.stratio.sparta.serving.core.models.files.{SpartaFile, SpartaFilesResponse}
import com.typesafe.config.{Config, ConfigFactory}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import spray.http.BodyPart

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}

@RunWith(classOf[JUnitRunner])
class DriverActorTest extends TestKit(ActorSystem("PluginActorSpec"))
  with DefaultTimeout
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfterEach
  with MockitoSugar with SpartaSerializer {

  val tempDir: Path = Files.createTempDirectory("test")
  tempDir.toFile.deleteOnExit()

  val localConfig: Config = ConfigFactory.parseString(
    s"""
       |sparta{
       |   api {
       |     host = local
       |     port= 7777
       |   }
       |}
       |
       |sparta.config.driverPackageLocation = "$tempDir"
    """.stripMargin)

  val fileList = Seq(BodyPart("reference.conf", "file"))

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
    SpartaConfig.initApiConfig()
  }

  override def afterAll: Unit = {
    shutdown()
  }

  override implicit val timeout: Timeout = Timeout(15 seconds)

  "DriverActor " must {

    "Not save files with wrong extension" in {
      val driverActor = system.actorOf(Props(new DriverActor()))
      driverActor ! UploadDrivers(fileList)
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.isEmpty shouldBe true
      }
    }
    "Not upload empty files" in {
      val driverActor = system.actorOf(Props(new DriverActor()))
      driverActor ! UploadDrivers(Seq.empty)
      expectMsgPF() {
        case SpartaFilesResponse(Failure(f)) => f.getMessage shouldBe "At least one file is expected"
      }
    }
    "Save a file" in {
      val driverActor = system.actorOf(Props(new DriverActor()))
      driverActor ! UploadDrivers(Seq(BodyPart("reference.conf", "file.jar")))
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.head.fileName.endsWith("file.jar") shouldBe true
      }
    }
  }
} 
Example 147
Source File: PluginActorTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import java.nio.file.{Files, Path}

import akka.actor.{ActorSystem, Props}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestKit}
import akka.util.Timeout
import com.stratio.sparta.serving.api.actor.PluginActor.{PluginResponse, UploadPlugins}
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.SpartaSerializer
import com.stratio.sparta.serving.core.models.files.{SpartaFile, SpartaFilesResponse}
import com.typesafe.config.{Config, ConfigFactory}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import spray.http.BodyPart

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}

@RunWith(classOf[JUnitRunner])
class PluginActorTest extends TestKit(ActorSystem("PluginActorSpec"))
  with DefaultTimeout
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfterEach
  with MockitoSugar with SpartaSerializer {

  val tempDir: Path = Files.createTempDirectory("test")
  tempDir.toFile.deleteOnExit()

  val localConfig: Config = ConfigFactory.parseString(
    s"""
       |sparta{
       |   api {
       |     host = local
       |     port= 7777
       |   }
       |}
       |
       |sparta.config.pluginPackageLocation = "$tempDir"
    """.stripMargin)


  val fileList = Seq(BodyPart("reference.conf", "file"))

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
    SpartaConfig.initApiConfig()
  }

  override def afterAll: Unit = {
    shutdown()
  }

  override implicit val timeout: Timeout = Timeout(15 seconds)

  "PluginActor " must {

    "Not save files with wrong extension" in {
      val pluginActor = system.actorOf(Props(new PluginActor()))
      pluginActor ! UploadPlugins(fileList)
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.isEmpty shouldBe true
      }
    }
    "Not upload empty files" in {
      val pluginActor = system.actorOf(Props(new PluginActor()))
      pluginActor ! UploadPlugins(Seq.empty)
      expectMsgPF() {
        case SpartaFilesResponse(Failure(f)) => f.getMessage shouldBe "At least one file is expected"
      }
    }
    "Save a file" in {
      val pluginActor = system.actorOf(Props(new PluginActor()))
      pluginActor ! UploadPlugins(Seq(BodyPart("reference.conf", "file.jar")))
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.head.fileName.endsWith("file.jar") shouldBe true
      }
    }
  }

} 
Example 148
Source File: PluginsHttpServiceTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.service.http

import akka.actor.ActorRef
import akka.testkit.TestProbe
import com.stratio.sparta.serving.api.actor.PluginActor.{PluginResponse, UploadPlugins}
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.dto.LoggedUserConstant
import com.stratio.sparta.serving.core.models.files.{SpartaFile, SpartaFilesResponse}
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import spray.http._

import scala.util.{Failure, Success}

@RunWith(classOf[JUnitRunner])
class PluginsHttpServiceTest extends WordSpec
  with PluginsHttpService
  with HttpServiceBaseTest {

  override val supervisor: ActorRef = testProbe.ref

  val pluginTestProbe = TestProbe()

  val dummyUser = Some(LoggedUserConstant.AnonymousUser)

  override implicit val actors: Map[String, ActorRef] = Map.empty

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
  }

  "PluginsHttpService.upload" should {
    "Upload a file" in {
      val response = SpartaFilesResponse(Success(Seq(SpartaFile("", "", "", ""))))
      startAutopilot(response)
      Put(s"/${HttpConstant.PluginsPath}") ~> routes(dummyUser) ~> check {
        testProbe.expectMsgType[UploadPlugins]
        status should be(StatusCodes.OK)
      }
    }
    "Fail when service is not available" in {
      val response = SpartaFilesResponse(Failure(new IllegalArgumentException("Error")))
      startAutopilot(response)
      Put(s"/${HttpConstant.PluginsPath}") ~> routes(dummyUser) ~> check {
        testProbe.expectMsgType[UploadPlugins]
        status should be(StatusCodes.InternalServerError)
      }
    }
  }
} 
Example 149
Source File: JdbcOutput.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.output.jdbc

import java.io.{Serializable => JSerializable}
import java.util.Properties

import com.stratio.sparta.sdk.pipeline.output.Output._
import com.stratio.sparta.sdk.pipeline.output.SaveModeEnum.SpartaSaveMode
import com.stratio.sparta.sdk.pipeline.output.{Output, SaveModeEnum}
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql._
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.jdbc.SpartaJdbcUtils
import org.apache.spark.sql.jdbc.SpartaJdbcUtils._

import scala.collection.JavaConversions._
import scala.util.{Failure, Success, Try}

class JdbcOutput(name: String, properties: Map[String, JSerializable]) extends Output(name, properties) {

  require(properties.getString("url", None).isDefined, "url must be provided")

  val url = properties.getString("url")

  override def supportedSaveModes : Seq[SpartaSaveMode] =
    Seq(SaveModeEnum.Append, SaveModeEnum.ErrorIfExists, SaveModeEnum.Ignore, SaveModeEnum.Overwrite)

  //scalastyle:off
  override def save(dataFrame: DataFrame, saveMode: SpartaSaveMode, options: Map[String, String]): Unit = {
    validateSaveMode(saveMode)
    val tableName = getTableNameFromOptions(options)
    val sparkSaveMode = getSparkSaveMode(saveMode)
    val connectionProperties = new JDBCOptions(url,
      tableName,
      propertiesWithCustom.mapValues(_.toString).filter(_._2.nonEmpty)
    )

    Try {
      if (sparkSaveMode == SaveMode.Overwrite) SpartaJdbcUtils.dropTable(url, connectionProperties, tableName)

      SpartaJdbcUtils.tableExists(url, connectionProperties, tableName, dataFrame.schema)
    } match {
      case Success(tableExists) =>
        if (tableExists) {
          if (saveMode == SaveModeEnum.Upsert) {
            val updateFields = getPrimaryKeyOptions(options) match {
              case Some(pk) => pk.split(",").toSeq
              case None => dataFrame.schema.fields.filter(stField =>
                stField.metadata.contains(Output.PrimaryKeyMetadataKey)).map(_.name).toSeq
            }
            SpartaJdbcUtils.upsertTable(dataFrame, url, tableName, connectionProperties, updateFields)
          }

          if (saveMode == SaveModeEnum.Ignore) return

          if (saveMode == SaveModeEnum.ErrorIfExists) sys.error(s"Table $tableName already exists.")

          if (saveMode == SaveModeEnum.Append || saveMode == SaveModeEnum.Overwrite)
            SpartaJdbcUtils.saveTable(dataFrame, url, tableName, connectionProperties)
        } else log.warn(s"Table not created in Postgres: $tableName")
      case Failure(e) =>
        closeConnection()
        log.error(s"Error creating/dropping table $tableName")
    }
  }

  override def cleanUp(options: Map[String, String]): Unit = {
    log.info(s"Closing connections in JDBC Output: $name")
    closeConnection()
  }
} 
Example 150
Source File: PostgresOutput.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.output.postgres

import java.io.{InputStream, Serializable => JSerializable}
import java.util.Properties

import com.stratio.sparta.sdk.pipeline.output.Output._
import com.stratio.sparta.sdk.pipeline.output.SaveModeEnum.SpartaSaveMode
import com.stratio.sparta.sdk.pipeline.output.{Output, SaveModeEnum}
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql._
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.jdbc.SpartaJdbcUtils
import org.apache.spark.sql.jdbc.SpartaJdbcUtils._
import org.postgresql.copy.CopyManager
import org.postgresql.core.BaseConnection

import scala.collection.JavaConversions._
import scala.util.{Failure, Success, Try}

class PostgresOutput(name: String, properties: Map[String, JSerializable]) extends Output(name, properties) {

  require(properties.getString("url", None).isDefined, "Postgres url must be provided")

  val url = properties.getString("url")
  val bufferSize = properties.getString("bufferSize", "65536").toInt
  val delimiter = properties.getString("delimiter", "\t")
  val newLineSubstitution = properties.getString("newLineSubstitution", " ")
  val encoding = properties.getString("encoding", "UTF8")

  override def supportedSaveModes: Seq[SpartaSaveMode] =
    Seq(SaveModeEnum.Append, SaveModeEnum.Overwrite, SaveModeEnum.Upsert)

  override def save(dataFrame: DataFrame, saveMode: SpartaSaveMode, options: Map[String, String]): Unit = {
    validateSaveMode(saveMode)
    val tableName = getTableNameFromOptions(options)
    val sparkSaveMode = getSparkSaveMode(saveMode)
    val connectionProperties = new JDBCOptions(url,
      tableName,
      propertiesWithCustom.mapValues(_.toString).filter(_._2.nonEmpty)
    )

    Try {
      if (sparkSaveMode == SaveMode.Overwrite) SpartaJdbcUtils.dropTable(url, connectionProperties, tableName)

      SpartaJdbcUtils.tableExists(url, connectionProperties, tableName, dataFrame.schema)
    } match {
      case Success(tableExists) =>
        if (tableExists)
          if (saveMode == SaveModeEnum.Upsert) {
            val updateFields = getPrimaryKeyOptions(options) match {
              case Some(pk) => pk.split(",").toSeq
              case None => dataFrame.schema.fields.filter(stField =>
                stField.metadata.contains(Output.PrimaryKeyMetadataKey)).map(_.name).toSeq
            }
            SpartaJdbcUtils.upsertTable(dataFrame, url, tableName, connectionProperties, updateFields)
          } else {
            dataFrame.foreachPartition { rows =>
              val conn = getConnection(connectionProperties)
              val cm = new CopyManager(conn.asInstanceOf[BaseConnection])

              cm.copyIn(
                s"""COPY $tableName FROM STDIN WITH (NULL 'null', ENCODING '$encoding', FORMAT CSV, DELIMITER E'$delimiter')""",
                rowsToInputStream(rows)
              )
            }
          }
        else log.warn(s"Table not created in Postgres: $tableName")
      case Failure(e) =>
        closeConnection()
        log.error(s"Error creating/dropping table $tableName")
    }
  }

  def rowsToInputStream(rows: Iterator[Row]): InputStream = {
    val bytes: Iterator[Byte] = rows.flatMap { row =>
      (row.mkString(delimiter).replace("\n", newLineSubstitution) + "\n").getBytes(encoding)
    }

    new InputStream {
      override def read(): Int =
        if (bytes.hasNext) bytes.next & 0xff
        else -1
    }
  }

  override def cleanUp(options: Map[String, String]): Unit = {
    log.info(s"Closing connections in Postgres Output: $name")
    closeConnection()
  }
} 
Example 151
Source File: TwitterJsonInput.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.input.twitter

import java.io.{Serializable => JSerializable}

import com.google.gson.Gson
import com.stratio.sparta.sdk.pipeline.input.Input
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql.Row
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.twitter.TwitterUtils
import twitter4j.TwitterFactory
import twitter4j.conf.ConfigurationBuilder

import scala.util.{Failure, Success, Try}


class TwitterJsonInput(properties: Map[String, JSerializable]) extends Input(properties) {

  System.setProperty("twitter4j.oauth.consumerKey", properties.getString("consumerKey"))
  System.setProperty("twitter4j.oauth.consumerSecret", properties.getString("consumerSecret"))
  System.setProperty("twitter4j.oauth.accessToken", properties.getString("accessToken"))
  System.setProperty("twitter4j.oauth.accessTokenSecret", properties.getString("accessTokenSecret"))

  val cb = new ConfigurationBuilder()
  val tf = new TwitterFactory(cb.build())
  val twitterApi = tf.getInstance()
  val trends = twitterApi.getPlaceTrends(1).getTrends.map(trend => trend.getName)
  val terms: Option[Seq[String]] = Try(properties.getString("termsOfSearch")) match {
    case Success("") => None
    case Success(t: String) => Some(t.split(",").toSeq)
    case Failure(_) => None
  }
  val search = terms.getOrElse(trends.toSeq)

  def initStream(ssc: StreamingContext, sparkStorageLevel: String): DStream[Row] = {
    TwitterUtils.createStream(ssc, None, search, storageLevel(sparkStorageLevel))
      .map(stream => {
        val gson = new Gson()
        Row(gson.toJson(stream))
      }
      )
  }
} 
Example 152
Source File: GeoParser.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.transformation.geo

import java.io.{Serializable => JSerializable}

import com.stratio.sparta.sdk.pipeline.schema.TypeOp
import com.stratio.sparta.sdk.pipeline.transformation.{Parser, WhenError}
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType

import scala.util.{Failure, Success, Try}

class GeoParser(
                 order: Integer,
                 inputField: Option[String],
                 outputFields: Seq[String],
                 schema: StructType,
                 properties: Map[String, JSerializable]
               ) extends Parser(order, inputField, outputFields, schema, properties) {

  val defaultLatitudeField = "latitude"
  val defaultLongitudeField = "longitude"
  val separator = "__"

  val latitudeField = properties.getOrElse("latitude", defaultLatitudeField).toString
  val longitudeField = properties.getOrElse("longitude", defaultLongitudeField).toString

  def parse(row: Row): Seq[Row] = {
    val newData = Try {
      val geoValue = geoField(getLatitude(row), getLongitude(row))
      outputFields.map(outputField => {
        val outputSchemaValid = outputFieldsSchema.find(field => field.name == outputField)
        outputSchemaValid match {
          case Some(outSchema) =>
            TypeOp.transformValueByTypeOp(outSchema.dataType, geoValue)
          case None =>
            returnWhenError(
              throw new IllegalStateException(s"Impossible to parse outputField: $outputField in the schema"))
        }
      })
    }

    returnData(newData, removeInputField(row))
  }

  private def getLatitude(row: Row): String = {
    val latitude = Try(row.get(schema.fieldIndex(latitudeField)))
      .getOrElse(throw new RuntimeException(s"Impossible to parse $latitudeField in the event: ${row.mkString(",")}"))

    latitude match {
      case valueCast: String => valueCast
      case valueCast: Array[Byte] => new Predef.String(valueCast)
      case _ => latitude.toString
    }
  }

  private def getLongitude(row: Row): String = {
    val longitude = Try(row.get(schema.fieldIndex(longitudeField)))
      .getOrElse(throw new RuntimeException(s"Impossible to parse $latitudeField in the event: ${row.mkString(",")}"))

    longitude match {
      case valueCast: String => valueCast
      case valueCast: Array[Byte] => new Predef.String(valueCast)
      case _ => longitude.toString
    }
  }

  private def geoField(latitude: String, longitude: String): String = latitude + separator + longitude
} 
Example 153
Source File: JsonSource.scala    From play-json-schema-validator   with Apache License 2.0 5 votes vote down vote up
package com.eclipsesource.schema

import java.io.InputStream
import java.net.URL

import play.api.libs.json._

import scala.io.Source
import scala.util.{Failure, Success, Try}


  def schemaFromUrl(url: URL)(implicit reads: Reads[SchemaType]): JsResult[SchemaType] = {
    for {
      schemaJson <- JsonSource.fromUrl(url) match {
        case Success(json) => JsSuccess(json)
        case Failure(throwable) => JsError(throwable.getMessage)
      }
      schema <- Json.fromJson[SchemaType](schemaJson)
    } yield schema
  }
} 
Example 154
Source File: DonutRoute.scala    From scala-for-beginners   with Apache License 2.0 5 votes vote down vote up
package com.allaboutscala.donutstore.routes

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.allaboutscala.donutstore.common.Donut
import com.allaboutscala.donutstore.config.DonutStoreConfig
import com.allaboutscala.donutstore.data.DataApi
import com.allaboutscala.donutstore.marshalling.JsonSupport

import scala.util.{Failure, Success}



class DonutRoute extends HttpRoute with JsonSupport {
  override def routes()(implicit config: DonutStoreConfig, dataApi: DataApi): Route = {
    path("add-donut") {
      post {
        entity(as[Donut]) { donut =>
          onComplete(dataApi.createDonut(donut)) {
            case Success(createMessage) => complete(StatusCodes.Created, createMessage)
            case Failure(ex) => complete(s"Failed to create donut, exception = ${ex.getMessage}")
          }
        }
      } ~
      get {
        complete(StatusCodes.MethodNotAllowed)
      }
    } ~
      path("donuts") {
      get {
        onComplete(dataApi.fetchDonuts()) {
          case Success(donuts) => complete(StatusCodes.OK, donuts)
          case Failure(ex) => complete(s"Failed to fetch donuts, exception = ${ex.getMessage}")
        }
      }
    } ~
    path("donuts" / Segment) { donutName =>
      delete {
        onComplete(dataApi.deleteDonut(donutName)) {
          case Success(deleteMessage) => complete(StatusCodes.OK, deleteMessage)
          case Failure(ex) => complete(s"Failed to delete donut, exception = ${ex.getMessage}")
        }
      }
    } ~
    path("donuts" / Segment) { donutName =>
      post {
        parameter("ingredients") { ingredients =>
          val donutIngredients = ingredients.split(",").toList
          val donut = Donut(donutName, donutIngredients)
          onComplete(dataApi.updateDonutIngredients(donut)) {
            case Success(updateMessage) => complete(StatusCodes.OK, updateMessage)
            case Failure(ex) => complete(s"Failed to update ingredients, exception = ${ex.getMessage}")
          }
        }
      }
    }
  }
} 
Example 155
Source File: DonutStoreHttpController.scala    From scala-for-beginners   with Apache License 2.0 5 votes vote down vote up
package com.allaboutscala.donutstore.httpserver

import akka.http.scaladsl.Http
import com.typesafe.scalalogging.LazyLogging

import scala.io.StdIn
import scala.util.{Failure, Success}



trait DonutStoreHttpController extends LazyLogging {
  this: DonutStoreServices =>

  def startAndBind(): Unit = {
    logger.info("Initializing and binding Akka HTTP server")
    val httpServerFuture = Http().bindAndHandle(donutApiRoutes, cfg.httpServer.ip, cfg.httpServer.port)
    httpServerFuture.onComplete {
      case Success(binding) =>
        logger.info(s"Akka Http Server is bound to ${binding.localAddress}")
        logger.info(s"To stop the server, press the [Enter] key in IntelliJ's console.")

      case Failure(e) =>
        logger.error(s"Akka Http server failed to bind to ${cfg.httpServer.ip}:${cfg.httpServer.port}",e)
        system.terminate()
    }

    // pressing enter key will kill the server
    StdIn.readLine()
    for {
      serverBinding <- httpServerFuture
      _             <- serverBinding.unbind()
      terminated    <- system.terminate()
    } yield logger.info(s"Akka Http server was terminated = $terminated")
  }
} 
Example 156
Source File: DonutRoute.scala    From scala-for-beginners   with Apache License 2.0 5 votes vote down vote up
package com.allaboutscala.donutstore.httpserver.routes

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import com.allaboutscala.donutstore.common.Donut
import com.allaboutscala.donutstore.common.marshalling.JsonSupport
import com.allaboutscala.donutstore.config.DonutStoreConfig
import com.allaboutscala.donutstore.data.DataApi

import scala.util.{Failure, Success}



class DonutRoute extends HttpRoute with JsonSupport {
  override def routes()(implicit config: DonutStoreConfig, dataApi: DataApi): Route = {
    path("add-donut") {
      post {
        entity(as[Donut]) { donut =>
          onComplete(dataApi.createDonut(donut)) {
            case Success(createMessage) => complete(StatusCodes.Created, createMessage)
            case Failure(ex) => complete(s"Failed to create donut, exception = ${ex.getMessage}")
          }
        }
      } ~
        get {
          complete(StatusCodes.MethodNotAllowed)
        }
    } ~
      path("donuts") {
        get {
          onComplete(dataApi.fetchDonuts()) {
            case Success(donuts) => complete(StatusCodes.OK, donuts)
            case Failure(ex) => complete(s"Failed to fetch donuts, exception = ${ex.getMessage}")
          }
        }
      } ~
      path("donuts" / Segment) { donutName =>
        delete {
          onComplete(dataApi.deleteDonut(donutName)) {
            case Success(deleteMessage) => complete(StatusCodes.OK, deleteMessage)
            case Failure(ex) => complete(s"Failed to delete donut, exception = ${ex.getMessage}")
          }
        }
      } ~
      path("donuts" / Segment) { donutName =>
        post {
          parameter("ingredients") { ingredients =>
            val donutIngredients = ingredients.split(",").toList
            val donut = Donut(donutName, donutIngredients)
            onComplete(dataApi.updateDonutIngredients(donut)) {
              case Success(updateMessage) => complete(StatusCodes.OK, updateMessage)
              case Failure(ex) => complete(s"Failed to update ingredients, exception = ${ex.getMessage}")
            }
          }
        }
      }
  }
} 
Example 157
Source File: DonutStoreHttpController.scala    From scala-for-beginners   with Apache License 2.0 5 votes vote down vote up
package com.allaboutscala.donutstore.httpserver


import akka.http.scaladsl.Http
import com.typesafe.scalalogging.LazyLogging

import scala.io.StdIn
import scala.util.{Failure, Success}



trait DonutStoreHttpController extends LazyLogging {
  this: DonutStoreServices =>

  def startAndBind(): Unit = {
    logger.info("Initializing and binding Akka HTTP server")
    val httpServerFuture = Http().bindAndHandle(donutApiRoutes, cfg.httpServer.ip, cfg.httpServer.port)
    httpServerFuture.onComplete {
      case Success(binding) =>
        logger.info(s"Akka Http Server is bound to ${binding.localAddress}")
        logger.info(s"To stop the server, press the [Enter] key in IntelliJ's console.")

      case Failure(e) =>
        logger.error(s"Akka Http server failed to bind to ${cfg.httpServer.ip}:${cfg.httpServer.port}",e)
        system.terminate()
    }

    // pressing enter key will kill the server
    StdIn.readLine()
    for {
      serverBinding <- httpServerFuture
      _             <- serverBinding.unbind()
      terminated    <- system.terminate()
    } yield logger.info(s"Akka Http server was terminated = $terminated")
  }
} 
Example 158
Source File: Main.scala    From hammock   with MIT License 5 votes vote down vote up
package examplenode

import hammock._
import hammock.marshalling._
import hammock.circe.implicits._
import io.circe.generic.auto._
import cats.effect.IO
import scala.util.{Failure, Success}
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import hammock.fetch.Interpreter._

object Main {
  implicit val cs = IO.contextShift(queue)

  def main(args: Array[String]): Unit = {

    val endpoint = uri"http://www.mocky.io/v2/5185415ba171ea3a00704eed"
    case class Req(name: String, number: Int)
    case class Resp(hello: String)
    val request = Hammock
      .request(Method.POST, endpoint, Map(), Some(Req("name", 4)))
      .as[Resp]
      .exec[IO]
    request.unsafeToFuture.onComplete {
      case Success(resp) => println("hello: " + resp)
      case Failure(ex)   => println(ex)
    }
  }
} 
Example 159
Source File: slick.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.free

import _root_.slick.dbio.{DBIO, DBIOAction}
import _root_.slick.jdbc.JdbcBackend
import freestyle.free.async._
import freestyle.async.AsyncContext

import scala.util.{Failure, Success}

import scala.concurrent.{ExecutionContext, Future}

object slick {

  @free sealed trait SlickM {
    def run[A](f: DBIO[A]): FS[A]
  }

  trait Implicits {
    implicit def freeStyleSlickHandler[M[_]](
        implicit asyncContext: AsyncContext[M],
        db: JdbcBackend#DatabaseDef,
        ec: ExecutionContext): SlickM.Handler[M] =
      new SlickM.Handler[M] {
        def run[A](fa: DBIO[A]): M[A] = asyncContext.runAsync { cb =>
          db.run(fa).onComplete {
            case Success(x) => cb(Right(x))
            case Failure(e) => cb(Left(e))
          }
        }
      }

    implicit def freeStyleSlickFutureHandler(
        implicit db: JdbcBackend#DatabaseDef,
        ec: ExecutionContext): SlickM.Handler[Future] =
      new SlickM.Handler[Future] {
        def run[A](fa: DBIO[A]): Future[A] = db.run(fa)
      }

    implicit def freeSLiftSlick[F[_]: SlickM]: FreeSLift[F, DBIO] =
      new FreeSLift[F, DBIO] {
        def liftFSPar[A](dbio: DBIO[A]): FreeS.Par[F, A] = SlickM[F].run(dbio)
      }
  }

  object implicits extends Implicits
} 
Example 160
Source File: FederationServer.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk

import java.net.HttpURLConnection.HTTP_NOT_FOUND

import com.typesafe.scalalogging.LazyLogging
import okhttp3.{Headers, HttpUrl, OkHttpClient, Request}
import org.json4s.native.{JsonMethods, Serialization}
import org.json4s.{Formats, NoTypeHints}
import stellar.sdk.inet.RestException
import stellar.sdk.model.response.{FederationResponse, FederationResponseDeserialiser}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

case class FederationServer(base: HttpUrl) extends LazyLogging {

  implicit val formats: Formats = Serialization.formats(NoTypeHints) + FederationResponseDeserialiser
  private val client = new OkHttpClient()
  private val headers = Headers.of(
    "X-Client-Name", BuildInfo.name,
    "X-Client-Version", BuildInfo.version)

  def byName(name: String)(implicit ec: ExecutionContext): Future[Option[FederationResponse]] =
    fetchFederationResponse(base.newBuilder()
      .addQueryParameter("q", name)
      .addQueryParameter("type", "name")
      .build(),  _.copy(address = name))

  def byAccount(account: PublicKey)(implicit ec: ExecutionContext): Future[Option[FederationResponse]] =
    fetchFederationResponse(base.newBuilder()
      .addQueryParameter("q", account.accountId)
      .addQueryParameter("type", "id")
      .build(), _.copy(account = account))


  private def fetchFederationResponse(url: HttpUrl, fillIn: FederationResponse => FederationResponse)
                                     (implicit ec: ExecutionContext): Future[Option[FederationResponse]] =
    Future(client.newCall(new Request.Builder().url(url).headers(headers).build()).execute())
      .map { response =>
        response.code() match {
          case HTTP_NOT_FOUND => None
          case e if e >= 500 => throw RestException(response.body().string())
          case _ =>
            Try(response.body().string())
              .map(JsonMethods.parse(_))
              .map(_.extract[FederationResponse])
              .map(fillIn)
              .map(validate) match {
              case Success(fr) => Some(fr)
              case Failure(t) => throw RestException("Could not parse document as FederationResponse.", t)
            }
        }
      }


  private def validate(fr: FederationResponse): FederationResponse = {
    if (fr.account == null) throw RestException(s"Document did not contain account_id")
    if (fr.address == null) throw RestException(s"Document did not contain stellar_address")
    fr
  }
}

object FederationServer {
  def apply(uriString: String): FederationServer = new FederationServer(HttpUrl.parse(uriString))
} 
Example 161
Source File: TransactionLedgerEntriesSpec.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.ledger

import com.typesafe.scalalogging.LazyLogging
import org.specs2.mutable.Specification

import scala.util.{Failure, Try}

class TransactionLedgerEntriesSpec extends Specification with LedgerEntryGenerators with LazyLogging {

  "a ledger entry" should {
    "serde to/from XDR" >> prop { entries: TransactionLedgerEntries =>
      val triedEntries = Try(TransactionLedgerEntries.decode.run(entries.encode).value._2)
      triedEntries match {
        case Failure(_) => logger.error(s"Failed to decode $entries")
        case _ =>
      }
      triedEntries must beSuccessfulTry(entries)
    }
  }

} 
Example 162
Source File: LedgerEntrySpec.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.ledger

import com.typesafe.scalalogging.LazyLogging
import org.specs2.mutable.Specification

import scala.util.{Failure, Try}

class LedgerEntrySpec extends Specification with LedgerEntryGenerators with LazyLogging {

  "a ledger entry" should {
    "serde to/from XDR" >> prop { entry: LedgerEntry =>
      val triedEntry = Try(LedgerEntry.decode.run(entry.encode).value._2)
      triedEntry match {
        case Failure(_) => logger.error(s"Failed to decode $entry")
        case _ =>
      }
      triedEntry must beSuccessfulTry(entry)
    }
  }

} 
Example 163
Source File: LedgerEntryChangeSpec.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.ledger

import com.typesafe.scalalogging.LazyLogging
import org.specs2.mutable.Specification

import scala.util.{Failure, Try}

class LedgerEntryChangeSpec extends Specification with LedgerEntryGenerators with LazyLogging {

  "a ledger entry change" should {
    "serde to/from XDR" >> prop { change: LedgerEntryChange =>
      val triedChange = Try(LedgerEntryChange.decode.run(change.encode).value._2)
      triedChange match {
        case Failure(_) => logger.error(s"Failed to decode $change")
        case _ =>
      }
      triedChange must beSuccessfulTry(change)
    }
  }

} 
Example 164
Source File: ShellExecutor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.examples.distributedshell

import scala.sys.process._
import scala.util.{Failure, Success, Try}

import akka.actor.Actor
import org.slf4j.Logger

import org.apache.gearpump.cluster.{ExecutorContext, UserConfig}
import org.apache.gearpump.examples.distributedshell.DistShellAppMaster.{ShellCommand, ShellCommandResult}
import org.apache.gearpump.util.LogUtil


class ShellExecutor(executorContext: ExecutorContext, userConf: UserConfig) extends Actor {
  import executorContext._
  private val LOG: Logger = LogUtil.getLogger(getClass, executor = executorId, app = appId)

  LOG.info(s"ShellExecutor started!")

  override def receive: Receive = {
    case ShellCommand(command) =>
      val process = Try(s"$command".!!)
      val result = process match {
        case Success(msg) => msg
        case Failure(ex) => ex.getMessage
      }
      sender ! ShellCommandResult(executorId, result)
  }
} 
Example 165
Source File: ShellExecutorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.examples.distributedshell

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.sys.process._
import scala.util.{Failure, Success, Try}

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import org.scalatest.{Matchers, WordSpec}

import org.apache.gearpump.cluster.appmaster.WorkerInfo
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{ExecutorContext, TestUtil, UserConfig}
import org.apache.gearpump.examples.distributedshell.DistShellAppMaster.{ShellCommand, ShellCommandResult}

class ShellExecutorSpec extends WordSpec with Matchers {

  "ShellExecutor" should {
    "execute the shell command and return the result" in {
      val executorId = 1
      val workerId = WorkerId(2, 0L)
      val appId = 0
      val appName = "app"
      val resource = Resource(1)
      implicit val system = ActorSystem("ShellExecutor", TestUtil.DEFAULT_CONFIG)
      val mockMaster = TestProbe()(system)
      val worker = TestProbe()
      val workerInfo = WorkerInfo(workerId, worker.ref)
      val executorContext = ExecutorContext(executorId, workerInfo, appId, appName,
        mockMaster.ref, resource)
      val executor = system.actorOf(Props(classOf[ShellExecutor], executorContext,
        UserConfig.empty))

      val process = Try(s"ls /".!!)
      val result = process match {
        case Success(msg) => msg
        case Failure(ex) => ex.getMessage
      }
      executor.tell(ShellCommand("ls /"), mockMaster.ref)
      assert(mockMaster.receiveN(1).head.asInstanceOf[ShellCommandResult].equals(
        ShellCommandResult(executorId, result)))

      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 166
Source File: DistServiceExecutor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.distributeservice

import java.io.{File, FileWriter}
import java.net.InetAddress
import scala.collection.JavaConverters._
import scala.io.Source
import scala.sys.process._
import scala.util.{Failure, Success, Try}

import akka.actor.Actor
import org.apache.commons.io.FileUtils
import org.apache.commons.lang.text.StrSubstitutor
import org.slf4j.Logger

import org.apache.gearpump.cluster.{ExecutorContext, UserConfig}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.InstallService
import org.apache.gearpump.util.{ActorUtil, LogUtil}

class DistServiceExecutor(executorContext: ExecutorContext, userConf: UserConfig) extends Actor {
  import executorContext._
  private val LOG: Logger = LogUtil.getLogger(getClass, executor = executorId, app = appId)

  override def receive: Receive = {
    case InstallService(url, zipFileName, targetPath, scriptData, serviceName, serviceSettings) =>
      LOG.info(s"Executor $executorId receive command to install " +
        s"service $serviceName to $targetPath")
      unzipFile(url, zipFileName, targetPath)
      installService(scriptData, serviceName, serviceSettings)
  }

  private def unzipFile(url: String, zipFileName: String, targetPath: String) = {
    val zipFile = File.createTempFile(System.currentTimeMillis().toString, zipFileName)
    val dir = new File(targetPath)
    if (dir.exists()) {
      FileUtils.forceDelete(dir)
    }
    val bytes = FileServer.newClient.get(url).get
    FileUtils.writeByteArrayToFile(zipFile, bytes)
    val result = Try(s"unzip ${zipFile.getAbsolutePath} -d $targetPath".!!)
    result match {
      case Success(msg) => LOG.info(s"Executor $executorId unzip file to $targetPath")
      case Failure(ex) => throw ex
    }
  }

  private def installService(
      scriptData: Array[Byte], serviceName: String, serviceSettings: Map[String, Any]) = {
    val tempFile = File.createTempFile("gearpump", serviceName)
    FileUtils.writeByteArrayToFile(tempFile, scriptData)
    val script = new File("/etc/init.d", serviceName)
    writeFileWithEnvVariables(tempFile, script, serviceSettings ++ getEnvSettings)
    val result = Try(s"chkconfig --add $serviceName".!!)
    result match {
      case Success(msg) => LOG.info(s"Executor install service $serviceName successfully!")
      case Failure(ex) => throw ex
    }
  }

  private def getEnvSettings: Map[String, Any] = {
    Map("workerId" -> worker,
      "localhost" -> ActorUtil.getSystemAddress(context.system).host.get,
      "hostname" -> InetAddress.getLocalHost.getHostName)
  }

  private def writeFileWithEnvVariables(source: File, target: File, envs: Map[String, Any]) = {
    val writer = new FileWriter(target)
    val sub = new StrSubstitutor(envs.asJava)
    sub.setEnableSubstitutionInVariables(true)
    Source.fromFile(source).getLines().foreach(line => writer.write(sub.replace(line) + "\r\n"))
    writer.close()
  }
} 
Example 167
Source File: DistributeServiceClient.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.distributeservice

import java.io.File
import scala.concurrent.Future
import scala.util.{Failure, Success}

import akka.pattern.ask
import org.apache.commons.io.FileUtils

import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.{FileContainer, GetFileContainer, InstallService}
import org.apache.gearpump.util.{AkkaApp, Constants}


object DistributeServiceClient extends AkkaApp with ArgumentsParser {
  implicit val timeout = Constants.FUTURE_TIMEOUT

  override val options: Array[(String, CLIOption[Any])] = Array(
    "appid" -> CLIOption[Int]("<the distributed shell appid>", required = true),
    "file" -> CLIOption[String]("<service zip file path>", required = true),
    "script" -> CLIOption[String](
      "<file path of service script that will be installed to /etc/init.d>", required = true),
    "serviceName" -> CLIOption[String]("<service name>", required = true),
    "target" -> CLIOption[String]("<target path on each machine>", required = true)
  )

  override def help(): Unit = {
    super.help()
    // scalastyle:off println
    Console.err.println(s"-D<name>=<value> set a property to the service")
    // scalastyle:on println
  }

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(filterCustomOptions(args))
    val context = ClientContext(akkaConf)
    implicit val system = context.system
    implicit val dispatcher = system.dispatcher
    val appid = config.getInt("appid")
    val zipFile = new File(config.getString("file"))
    val script = new File(config.getString("script"))
    val serviceName = config.getString("serviceName")
    val appMaster = context.resolveAppID(appid)
    (appMaster ? GetFileContainer).asInstanceOf[Future[FileContainer]].map { container =>
      val bytes = FileUtils.readFileToByteArray(zipFile)
      val result = FileServer.newClient.save(container.url, bytes)
      result match {
        case Success(_) =>
          appMaster ! InstallService(container.url, zipFile.getName, config.getString("target"),
            FileUtils.readFileToByteArray(script), serviceName, parseServiceConfig(args))
          context.close()
        case Failure(ex) => throw ex
      }
    }
  }

  private def filterCustomOptions(args: Array[String]): Array[String] = {
    args.filter(!_.startsWith("-D"))
  }

  private def parseServiceConfig(args: Array[String]): Map[String, Any] = {
    val result = Map.empty[String, Any]
    args.foldLeft(result) { (result, argument) =>
      if (argument.startsWith("-D") && argument.contains("=")) {
        val fixedKV = argument.substring(2).split("=")
        result + (fixedKV(0) -> fixedKV(1))
      } else {
        result
      }
    }
  }
} 
Example 168
Source File: Util.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.integrationtest

import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

import org.apache.log4j.Logger

object Util {

  private val LOG = Logger.getLogger(getClass)

  def encodeUriComponent(s: String): String = {
    try {
      java.net.URLEncoder.encode(s, "UTF-8")
        .replaceAll("\\+", "%20")
        .replaceAll("\\%21", "!")
        .replaceAll("\\%27", "'")
        .replaceAll("\\%28", "(")
        .replaceAll("\\%29", ")")
        .replaceAll("\\%7E", "~")
    } catch {
      case ex: Throwable => s
    }
  }

  def retryUntil(
      condition: () => Boolean, conditionDescription: String, maxTries: Int = 15,
      interval: Duration = 10.seconds): Unit = {
    var met = false
    var tries = 0

    while (!met && tries < maxTries) {

      met = Try(condition()) match {
        case Success(true) => true
        case Success(false) => false
        case Failure(ex) => false
      }

      tries += 1

      if (!met) {
        LOG.error(s"Failed due to (false == $conditionDescription), " +
          s"retrying for the ${tries} times...")
        Thread.sleep(interval.toMillis)
      } else {
        LOG.info(s"Success ($conditionDescription) after ${tries} retries")
      }
    }

    if (!met) {
      throw new Exception(s"Failed after ${tries} retries, ($conditionDescription) == false")
    }
  }
} 
Example 169
Source File: SimpleKafkaReader.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.integrationtest.kafka

import scala.util.{Failure, Success}

import kafka.api.FetchRequestBuilder
import kafka.consumer.SimpleConsumer
import kafka.utils.Utils

import org.apache.gearpump.streaming.serializer.ChillSerializer

class SimpleKafkaReader(verifier: ResultVerifier, topic: String, partition: Int = 0,
    host: String, port: Int) {

  private val consumer = new SimpleConsumer(host, port, 100000, 64 * 1024, "")
  private val serializer = new ChillSerializer[Int]
  private var offset = 0L

  def read(): Unit = {
    val messageSet = consumer.fetch(
      new FetchRequestBuilder().addFetch(topic, partition, offset, Int.MaxValue).build()
    ).messageSet(topic, partition)

    for (messageAndOffset <- messageSet) {
      serializer.deserialize(Utils.readBytes(messageAndOffset.message.payload)) match {
        case Success(msg) =>
          offset = messageAndOffset.nextOffset
          verifier.onNext(msg)
        case Failure(e) => throw e
      }
    }
  }
} 
Example 170
Source File: SupervisorService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.Future
import scala.util.{Failure, Success}

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.Materializer

import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData}
import org.apache.gearpump.cluster.ClientToMaster._
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.services.SupervisorService.{Path, Status}
import org.apache.gearpump.util.ActorUtil._
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


  private def authorize(internal: Route): Route = {
    if (supervisor == null) {
      failWith(new Exception("API not enabled, cannot find a valid supervisor! " +
        "Please make sure Gearpump is running on top of YARN or other resource managers"))
    } else {
      internal
    }
  }

  protected override def doRoute(implicit mat: Materializer) = pathPrefix("supervisor") {
    pathEnd {
      get {
        val path = if (supervisor == null) {
          null
        } else {
          supervisor.path.toString
        }
        complete(write(Path(path)))
      }
    } ~
    path("status") {
      post {
        if (supervisor == null) {
          complete(write(Status(enabled = false)))
        } else {
          complete(write(Status(enabled = true)))
        }
      }
    } ~
    path("addworker" / IntNumber) { workerCount =>
      post {
        authorize {
          onComplete(askActor[CommandResult](supervisor, AddWorker(workerCount))) {
            case Success(value) =>
              complete(write(value))
            case Failure(ex) =>
              failWith(ex)
          }
        }
      }
    } ~
    path("removeworker" / Segment) { workerIdString =>
      post {
        authorize {
          val workerId = WorkerId.parse(workerIdString)
          def future(): Future[CommandResult] = {
            askWorker[WorkerData](master, workerId, GetWorkerData(workerId)).flatMap{workerData =>
              val containerId = workerData.workerDescription.resourceManagerContainerId
              askActor[CommandResult](supervisor, RemoveWorker(containerId))
            }
          }

          onComplete[CommandResult](future()) {
            case Success(value) =>
              complete(write(value))
            case Failure(ex) =>
              failWith(ex)
          }
        }
      }
    }
  }
}

object SupervisorService {
  case class Status(enabled: Boolean)

  case class Path(path: String)
} 
Example 171
Source File: WorkerService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.util.{Failure, Success}

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.stream.Materializer

import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData}
import org.apache.gearpump.cluster.ClientToMaster.{QueryHistoryMetrics, QueryWorkerConfig, ReadOption}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.MasterToClient.{HistoryMetrics, WorkerConfig}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.ActorUtil._
import org.apache.gearpump.util.Constants
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


class WorkerService(val master: ActorRef, override val system: ActorSystem)
  extends BasicService {

  import upickle.default.write
  private val systemConfig = system.settings.config
  private val concise = systemConfig.getBoolean(Constants.GEARPUMP_SERVICE_RENDER_CONFIG_CONCISE)

  protected override def doRoute(implicit mat: Materializer) = pathPrefix("worker" / Segment) {
    workerIdString => {
      pathEnd {
        val workerId = WorkerId.parse(workerIdString)
        onComplete(askWorker[WorkerData](master, workerId, GetWorkerData(workerId))) {
          case Success(value: WorkerData) =>
            complete(write(value.workerDescription))
          case Failure(ex) => failWith(ex)
        }
      }
    }~
    path("config") {
      val workerId = WorkerId.parse(workerIdString)
      onComplete(askWorker[WorkerConfig](master, workerId, QueryWorkerConfig(workerId))) {
        case Success(value: WorkerConfig) =>
          val config = Option(value.config).map(ClusterConfig.render(_, concise)).getOrElse("{}")
          complete(config)
        case Failure(ex) =>
          failWith(ex)
      }
    } ~
    path("metrics" / RemainingPath ) { path =>
      val workerId = WorkerId.parse(workerIdString)
      parameter(ReadOption.Key ? ReadOption.ReadLatest) { readOption =>
        val query = QueryHistoryMetrics(path.head.toString, readOption)
        onComplete(askWorker[HistoryMetrics](master, workerId, query)) {
          case Success(value) =>
            complete(write(value))
          case Failure(ex) =>
            failWith(ex)
        }
      }
    }
  }
} 
Example 172
Source File: FileSystem.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.yarn.glue

import java.io.{InputStream, OutputStream}
import java.net.ConnectException

import org.apache.gearpump.util.LogUtil
import org.apache.hadoop.fs.Path

import scala.util.{Failure, Success, Try}

class FileSystem(yarnConfig: YarnConfig) {

  private val conf = yarnConfig.conf
  private val fs = org.apache.hadoop.fs.FileSystem.get(conf)

  private def LOG = LogUtil.getLogger(getClass)

  def open(file: String): InputStream = exceptionHandler {
    val path = new Path(file)
    fs.open(path)
  }

  def create(file: String): OutputStream = exceptionHandler {
    val path = new Path(file)
    fs.create(path)
  }

  def exists(file: String): Boolean = exceptionHandler {
    val path = new Path(file)
    fs.exists(path)
  }

  def name: String = {
    fs.getUri.toString
  }

  def getHomeDirectory: String = {
    fs.getHomeDirectory.toString
  }

  private def exceptionHandler[T](call: => T): T = {
    val callTry = Try(call)
    callTry match {
      case Success(v) => v
      case Failure(ex) =>
        if (ex.isInstanceOf[ConnectException]) {
          LOG.error("Please check whether we connect to the right HDFS file system, " +
            "current file system is $name." + "\n. Please copy all configs under " +
            "$HADOOP_HOME/etc/hadoop into conf/yarnconf directory of Gearpump package, " +
            "so that we can use the right File system.", ex)
        }
        throw ex
    }
  }
} 
Example 173
Source File: RunningApplication.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.client

import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import org.apache.gearpump.cluster.ClientToMaster.{RegisterAppResultListener, ResolveAppId, ShutdownApplication}
import org.apache.gearpump.cluster.MasterToClient._
import org.apache.gearpump.cluster.client.RunningApplication._
import org.apache.gearpump.util.{ActorUtil, LogUtil}
import org.slf4j.Logger
import java.time.Duration
import java.util.concurrent.TimeUnit

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}

class RunningApplication(val appId: Int, master: ActorRef, timeout: Timeout) {
  lazy val appMaster: Future[ActorRef] = resolveAppMaster(appId)

  def shutDown(): Unit = {
    val result = ActorUtil.askActor[ShutdownApplicationResult](master,
      ShutdownApplication(appId), timeout)
    result.appId match {
      case Success(_) =>
      case Failure(ex) => throw ex
    }
  }

  
  def waitUntilFinish(): Unit = {
    this.waitUntilFinish(INF_DURATION)
  }

  def waitUntilFinish(duration: Duration): Unit = {
    val result = ActorUtil.askActor[ApplicationResult](master,
      RegisterAppResultListener(appId), new Timeout(duration.getSeconds, TimeUnit.SECONDS))
    if (result.appId == appId) {
      result match {
        case failed: ApplicationFailed =>
          throw failed.error
        case _: ApplicationSucceeded =>
          LOG.info(s"Application $appId succeeded")
        case _: ApplicationTerminated =>
          LOG.info(s"Application $appId terminated")
      }
    } else {
      LOG.warn(s"Received unexpected result $result for application $appId")
    }
  }

  def askAppMaster[T](msg: Any): Future[T] = {
    appMaster.flatMap(_.ask(msg)(timeout).asInstanceOf[Future[T]])
  }

  private def resolveAppMaster(appId: Int): Future[ActorRef] = {
    master.ask(ResolveAppId(appId))(timeout).
      asInstanceOf[Future[ResolveAppIdResult]].map(_.appMaster.get)
  }
}

object RunningApplication {
  private val LOG: Logger = LogUtil.getLogger(getClass)
  // This magic number is derived from Akka's configuration, which is the maximum delay
  private val INF_DURATION = Duration.ofSeconds(2147482)
} 
Example 174
Source File: Util.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import java.io.{BufferedReader, File, FileInputStream, InputStreamReader}
import java.net.{ServerSocket, URI}
import scala.concurrent.forkjoin.ThreadLocalRandom
import scala.sys.process.Process
import scala.util.{Failure, Success, Try}

import com.typesafe.config.{Config, ConfigFactory}

import org.apache.gearpump.cluster.AppJar
import org.apache.gearpump.jarstore.JarStoreClient
import org.apache.gearpump.transport.HostPort

object Util {
  val LOG = LogUtil.getLogger(getClass)
  private val defaultUri = new URI("file:///")
  private val appNamePattern = "^[a-zA-Z_][a-zA-Z0-9_]+$".r.pattern

  def validApplicationName(appName: String): Boolean = {
    appNamePattern.matcher(appName).matches()
  }

  def getCurrentClassPath: Array[String] = {
    val classpath = System.getProperty("java.class.path")
    val classpathList = classpath.split(File.pathSeparator)
    classpathList
  }

  def version: String = {
    val home = System.getProperty(Constants.GEARPUMP_HOME)
    val version = Try {
      val versionFile = new FileInputStream(new File(home, "VERSION"))
      val reader = new BufferedReader(new InputStreamReader(versionFile))
      val version = reader.readLine().replace("version:=", "")
      versionFile.close()
      version
    }
    version match {
      case Success(version) =>
        version
      case Failure(ex) =>
        LOG.error("failed to read VERSION file, " + ex.getMessage)
        "Unknown-Version"
    }
  }

  def startProcess(options: Array[String], classPath: Array[String], mainClass: String,
      arguments: Array[String]): RichProcess = {
    val java = System.getProperty("java.home") + "/bin/java"

    val command = List(java) ++ options ++
      List("-cp", classPath.mkString(File.pathSeparator), mainClass) ++ arguments
    LOG.info(s"Starting executor process java $mainClass ${arguments.mkString(" ")} " +
      s"\n ${options.mkString(" ")}")
    val logger = new ProcessLogRedirector()
    val process = Process(command).run(logger)
    new RichProcess(process, logger)
  }

  
  def resolveJvmSetting(conf: Config): AppJvmSettings = {

    import org.apache.gearpump.util.Constants._

    val appMasterVMArgs = Try(conf.getString(GEARPUMP_APPMASTER_ARGS).split("\\s+")
      .filter(_.nonEmpty)).toOption
    val executorVMArgs = Try(conf.getString(GEARPUMP_EXECUTOR_ARGS).split("\\s+")
      .filter(_.nonEmpty)).toOption

    val appMasterClassPath = Try(
      conf.getString(GEARPUMP_APPMASTER_EXTRA_CLASSPATH)
        .split("[;:]").filter(_.nonEmpty)).toOption

    val executorClassPath = Try(
      conf.getString(GEARPUMP_EXECUTOR_EXTRA_CLASSPATH)
        .split(File.pathSeparator).filter(_.nonEmpty)).toOption

    AppJvmSettings(
      JvmSetting(appMasterVMArgs.getOrElse(Array.empty[String]),
        appMasterClassPath.getOrElse(Array.empty[String])),
      JvmSetting(executorVMArgs
        .getOrElse(Array.empty[String]), executorClassPath.getOrElse(Array.empty[String])))
  }

  def asSubDirOfGearpumpHome(dir: String): File = {
    new File(System.getProperty(Constants.GEARPUMP_HOME), dir)

  }
} 
Example 175
Source File: RunningApplicationSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.client

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import akka.util.Timeout
import org.apache.gearpump.cluster.ClientToMaster.{ResolveAppId, ShutdownApplication}
import org.apache.gearpump.cluster.MasterToClient.{ResolveAppIdResult, ShutdownApplicationResult}
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.client.RunningApplicationSpec.{MockAskAppMasterRequest, MockAskAppMasterResponse}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global

class RunningApplicationSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit var system: ActorSystem = _

  override def beforeAll(): Unit = {
    system = ActorSystem("test", TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  "RunningApplication" should "be able to shutdown application" in {
    val errorMsg = "mock exception"
    val master = TestProbe()
    val timeout = Timeout(90, TimeUnit.SECONDS)
    val application = new RunningApplication(1, master.ref, timeout)
    Future {
      application.shutDown()
    }
    master.expectMsg(ShutdownApplication(1))
    master.reply(ShutdownApplicationResult(Success(1)))

    val result = Future {
      intercept[Exception] {
        application.shutDown()
      }
    }
    master.expectMsg(ShutdownApplication(1))
    master.reply(ShutdownApplicationResult(Failure(new Exception(errorMsg))))
    val exception = Await.result(result, Duration.Inf)
    assert(exception.getMessage.equals(errorMsg))
  }

  "RunningApplication" should "be able to ask appmaster" in {
    val master = TestProbe()
    val appMaster = TestProbe()
    val appId = 1
    val timeout = Timeout(90, TimeUnit.SECONDS)
    val request = MockAskAppMasterRequest("request")
    val application = new RunningApplication(appId, master.ref, timeout)
    val future = application.askAppMaster[MockAskAppMasterResponse](request)
    master.expectMsg(ResolveAppId(appId))
    master.reply(ResolveAppIdResult(Success(appMaster.ref)))
    appMaster.expectMsg(MockAskAppMasterRequest("request"))
    appMaster.reply(MockAskAppMasterResponse("response"))
    val result = Await.result(future, Duration.Inf)
    assert(result.res.equals("response"))

    // ResolveAppId should not be called multiple times
    val future2 = application.askAppMaster[MockAskAppMasterResponse](request)
    appMaster.expectMsg(MockAskAppMasterRequest("request"))
    appMaster.reply(MockAskAppMasterResponse("response"))
    val result2 = Await.result(future2, Duration.Inf)
    assert(result2.res.equals("response"))
  }
}

object RunningApplicationSpec {
  case class MockAskAppMasterRequest(req: String)

  case class MockAskAppMasterResponse(res: String)
} 
Example 176
Source File: MongoUtils.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo

import reactivemongo.api.MongoConnection.{ParsedURI, URIParsingException}
import reactivemongo.api.{DefaultDB, MongoConnection, MongoDriver}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}

// fixme remove global context
import scala.concurrent.ExecutionContext.Implicits.global

object MongoUtils {

  private val defaultPort: Int = 27017
  private lazy val mongoDriver = new MongoDriver()

  private def establishConnection(uri: ParsedURI, dbName: String, connectionTimeout: FiniteDuration): DefaultDB = {
    Await.result(establishConnection(uri, dbName), connectionTimeout)
  }

  private def establishConnection(uri: ParsedURI, dbName: String): Future[DefaultDB] =
    Try(mongoDriver.connection(uri).database(dbName))
    match {
      case Success(db) => db
      case Failure(err) =>
        throw new IllegalStateException(s"Can't connect to database ${printHosts(uri.hosts)}: ${err.getMessage}", err)
    }

  private def printHosts(hosts: List[(String, Int)]): String = hosts.map { case (host, port) => s"$host:$port" }.mkString(", ")

  def connectToDB(uri: ParsedURI, connectionTimeout: FiniteDuration): DefaultDB =
    uri.db match {
      case Some(dbName) => establishConnection(uri, dbName, connectionTimeout)
      case None => throw new IllegalStateException(s"Can't connect to database $uri.")
    }

  def connectToDB(uri: String, connectionTimeout: FiniteDuration): DefaultDB =  connectToDB(parseUri(uri), connectionTimeout)

  def parseHosts(hosts: Seq[String]): Seq[(String, Int)] = hosts.map { hostAndPort =>
    hostAndPort.split(':').toList match {
      case host :: port :: Nil =>
        host -> Try(port.toInt).filter(p => p > 0 && p < 65536)
          .getOrElse(throw new URIParsingException(s"Could not parse hosts '$hosts' from URI: invalid port '$port'"))
      case host :: Nil =>
        host -> defaultPort
      case _ => throw new URIParsingException(s"Could not parse hosts from URI: invalid definition '$hosts'")
    }
  }

  def parseUri(uri: String): ParsedURI = {
    MongoConnection.parseURI(uri) match {
      case Success(parsedUri) => parsedUri
      case Failure(err) => throw new IllegalStateException(s"Can't parse database uri. $err")
    }
  }
} 
Example 177
Source File: MongoRawCommandAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.command.MongoRawCommand
import com.ringcentral.gatling.mongo.response.MongoStringResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation.Validation
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.StatsEngine
import play.api.libs.json.JsObject
import reactivemongo.api.commands.Command
import reactivemongo.api.{DefaultDB, FailoverStrategy, ReadPreference}
import reactivemongo.play.json.ImplicitBSONHandlers._
import reactivemongo.play.json.JSONSerializationPack

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class MongoRawCommandAction(command: MongoRawCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {

  override def name: String = genName("Mongo raw command")

  override def commandName: Expression[String] = command.commandName

  override def executeCommand(commandName: String, session: Session): Validation[Unit] = for {
    commandText <- command.command(session)
    commandDocument <- string2JsObject(commandText)
  } yield {
    val sent = nowMillis
    val runner = Command.run(JSONSerializationPack, FailoverStrategy.default)
    runner.apply(database, runner.rawCommand(commandDocument)).one[JsObject](ReadPreference.primaryPreferred).onComplete {
      case Success(result) => processResult(session, sent, nowMillis, command.checks, MongoStringResponse(result.toString()), next, commandName)
      case Failure(err) => executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))
    }
  }
} 
Example 178
Source File: MongoUpdateAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.command.MongoUpdateCommand
import com.ringcentral.gatling.mongo.response.MongoCountResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation.Validation
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.StatsEngine
import reactivemongo.api.DefaultDB
import reactivemongo.play.json.ImplicitBSONHandlers._
import reactivemongo.play.json.collection.JSONCollection

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class MongoUpdateAction(command: MongoUpdateCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {

  override def name: String = genName("Mongo update command")

  override def commandName: Expression[String] = command.commandName

  override def executeCommand(commandName: String, session: Session): Validation[Unit] = for {
    collectionName <- command.collection(session)
    resolvedSelector <- command.selector(session)
    resolvedModifier <- command.modifier(session)
    upsert <- command.upsert(session)
    multi <- command.multi(session)
    selector <- string2JsObject(resolvedSelector)
    modifier <- string2JsObject(resolvedModifier)
  } yield {
    val sent = nowMillis
    database.collection[JSONCollection](collectionName).update(selector, modifier, upsert = upsert, multi = multi).onComplete {
      case Success(result) =>
        if (result.ok) {
          processResult(session, sent, nowMillis, command.checks, MongoCountResponse(result.n), next, commandName)
        } else {
          executeNext(session, sent, nowMillis, KO, next, commandName, Some(result.writeErrors.map(we => s"[${we.code}] ${we.errmsg}").mkString(", ")))
        }
      case Failure(err) =>
        executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))
    }

  }
} 
Example 179
Source File: MongoAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.check.MongoCheck
import com.ringcentral.gatling.mongo.response.MongoResponse
import io.gatling.commons.stats.{KO, OK, Status}
import io.gatling.commons.validation
import io.gatling.commons.validation.{NoneSuccess, Validation}
import io.gatling.core.action.{Action, ExitableAction}
import io.gatling.core.check.Check
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.message.ResponseTimings
import io.gatling.core.util.NameGen
import play.api.libs.json._
import reactivemongo.api.DefaultDB
import reactivemongo.api.collections.GenericQueryBuilder
import reactivemongo.play.json.JSONSerializationPack

import scala.util.{Failure, Success, Try}

abstract class MongoAction(database: DefaultDB) extends ExitableAction with NameGen {

  def commandName: Expression[String]

  def executeCommand(commandName: String, session: Session): Validation[Unit]

  override def execute(session: Session): Unit = recover(session) {
    commandName(session).flatMap { resolvedCommandName =>
      val outcome = executeCommand(resolvedCommandName, session)
      outcome.onFailure(errorMessage => statsEngine.reportUnbuildableRequest(session, resolvedCommandName, errorMessage))
      outcome
    }
  }

  def string2JsObject(string: String): Validation[JsObject] = {
    Try[JsObject](Json.parse(string).as[JsObject]) match {
      case Success(json) => validation.SuccessWrapper(json).success
      case Failure(err) =>
        validation.FailureWrapper(s"Error parse JSON string: $string. ${err.getMessage}").failure
    }
  }

  def string2JsObject(optionString: Option[String]): Validation[Option[JsObject]] =
    optionString match {
      case Some(string) => string2JsObject(string).map(Some.apply)
      case None => NoneSuccess
    }

  protected def executeNext(session: Session,
                            sent: Long,
                            received: Long,
                            status: Status,
                            next: Action,
                            requestName: String,
                            message: Option[String]): Unit = {
    val timings = ResponseTimings(sent, received)
    statsEngine.logResponse(session, requestName, timings, status, None, message)
    next ! session
  }

  protected def processResult(session: Session,
                              sent: Long,
                              received: Long,
                              checks: List[MongoCheck],
                              response: MongoResponse,
                              next: Action,
                              requestName: String): Unit = {
    // run all the checks, advise the Gatling API that it is complete and move to next
    val (checkSaveUpdate, error) = Check.check(response, session, checks)
    val newSession = checkSaveUpdate(session)
    error match {
      case Some(validation.Failure(errorMessage)) => executeNext(newSession.markAsFailed, sent, received, KO, next, requestName, Some(errorMessage))
      case _ => executeNext(newSession, sent, received, OK, next, requestName, None)
    }
  }

  implicit class GenericQueryBuilderExt(b: GenericQueryBuilder[JSONSerializationPack.type]) {

    def sort(sort: Option[JsObject]): GenericQueryBuilder[JSONSerializationPack.type] = {
      sort.map(b.sort).getOrElse(b)
    }

    def hint(sort: Option[JsObject]): GenericQueryBuilder[JSONSerializationPack.type] = {
      sort.map(b.hint).getOrElse(b)
    }
  }

} 
Example 180
Source File: MongoInsertAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.command.MongoInsertCommand
import com.ringcentral.gatling.mongo.response.MongoCountResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation.Validation
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.StatsEngine
import reactivemongo.api.DefaultDB
import reactivemongo.play.json.ImplicitBSONHandlers._
import reactivemongo.play.json.collection.JSONCollection

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class MongoInsertAction(command: MongoInsertCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {

  override def name: String = genName("Mongo insert command")

  override def commandName: Expression[String] = command.commandName

  override def executeCommand(commandName: String, session: Session): Validation[Unit] = for {
    collectionName <- command.collection(session)
    resolvedDocument <- command.document(session)
    document <- string2JsObject(resolvedDocument)
  } yield {
    val sent = nowMillis
    database.collection[JSONCollection](collectionName).insert(document).onComplete {
      case Success(result) =>
        if (result.ok) {
          processResult(session, sent, nowMillis, command.checks, MongoCountResponse(result.n), next, commandName)
        } else {
          executeNext(session, sent, nowMillis, KO, next, commandName, Some(result.writeErrors.map(we => s"[${we.code}] ${we.errmsg}").mkString(", ")))
        }
      case Failure(err) =>
        executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))
    }

  }
} 
Example 181
Source File: MongoCountAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.command.MongoCountCommand
import com.ringcentral.gatling.mongo.response.MongoCountResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation._
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session, _}
import io.gatling.core.stats.StatsEngine
import reactivemongo.api.DefaultDB
import reactivemongo.play.json.collection.JSONCollection

//TODO remove global context everywhere
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class MongoCountAction(command: MongoCountCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {

  override def name: String = genName("Mongo count command")

  override def commandName: Expression[String] = command.commandName

  override def executeCommand(commandName: String, session: Session): Validation[Unit] =
    for {
      collectionName <- command.collection(session)
      selectorDocument <- resolveOptionalExpression(command.selector, session)
      hint <- resolveOptionalExpression(command.hint, session)
      selector <- selectorDocument match {
        case Some(d) => string2JsObject(d).map(Some.apply)
        case None => NoneSuccess
      }
    } yield {
      val sent = nowMillis
      val collection: JSONCollection = database.collection[JSONCollection](collectionName)
      collection.count(selector, command.limit, command.skip, hint).onComplete {
        case Success(result) => processResult(session, sent, nowMillis, command.checks, MongoCountResponse(result), next, commandName)
        case Failure(err) => executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))
      }

    }
} 
Example 182
Source File: MongoRemoveAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.command.MongoRemoveCommand
import com.ringcentral.gatling.mongo.response.MongoCountResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation.Validation
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.StatsEngine
import reactivemongo.api.DefaultDB
import reactivemongo.play.json.ImplicitBSONHandlers._
import reactivemongo.play.json.collection.JSONCollection

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class MongoRemoveAction(command: MongoRemoveCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {

  override def name: String = genName("Mongo find command")

  override def commandName: Expression[String] = command.commandName

  override def executeCommand(commandName: String, session: Session): Validation[Unit] = for {
    collectionName <- command.collection(session)
    resolvedSelector <- command.selector(session)
    selector <- string2JsObject(resolvedSelector)
  } yield {
    val sent = nowMillis
    database.collection[JSONCollection](collectionName).remove(selector).onComplete {
      case Success(result) =>
        if (result.ok) {
          processResult(session, sent, nowMillis, command.checks, MongoCountResponse(result.n), next, commandName)
        } else {
          executeNext(session, sent, nowMillis, KO, next, commandName, Some(result.writeErrors.map(we => s"[${we.code}] ${we.errmsg}").mkString(", ")))
        }

      case Failure(err) => executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))

    }

  }
} 
Example 183
Source File: MongoFindAction.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.action

import com.ringcentral.gatling.mongo.command.MongoFindCommand
import com.ringcentral.gatling.mongo.response.MongoStringResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation.Validation
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session, resolveOptionalExpression}
import io.gatling.core.stats.StatsEngine
import play.api.libs.json.JsObject
import reactivemongo.api.{DefaultDB, QueryOpts, ReadPreference}
import reactivemongo.play.json.ImplicitBSONHandlers._
import reactivemongo.play.json.collection.JSONCollection
import reactivemongo.play.json.collection.JsCursor._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}

class MongoFindAction(command: MongoFindCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {

  override def name: String = genName("Mongo find command")

  override def commandName: Expression[String] = command.commandName

  override def executeCommand(commandName: String, session: Session): Validation[Unit] =
    for {
      collectionName <- command.collection(session)
      resolvedFilter <- command.query(session)
      filter <- string2JsObject(resolvedFilter)
      resolvedHint <- resolveOptionalExpression(command.hint, session)
      hint <- string2JsObject(resolvedHint)
      resolvedSort <- resolveOptionalExpression(command.sort, session)
      sort <- string2JsObject(resolvedSort)
    } yield {
      val sent = nowMillis
      database.collection[JSONCollection](collectionName).find(filter).options(QueryOpts().batchSize(command.limit)).sort(sort).hint(hint)
        .cursor[JsObject](ReadPreference.primary).jsArray(command.limit).onComplete {
        case Success(result) => processResult(session, sent, nowMillis, command.checks, MongoStringResponse(result.toString()), next, commandName)
        case Failure(err) => executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))
      }
    }
} 
Example 184
Source File: SparkKernelClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.util.Timeout
import org.apache.toree.comm._
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.client.execution.{DeferredExecution, ExecuteRequestTuple}
import org.apache.toree.kernel.protocol.v5.client.socket.HeartbeatMessage
import org.apache.toree.kernel.protocol.v5.client.socket.StdinClient.{ResponseFunctionMessage, ResponseFunction}
import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest
import org.apache.toree.utils.LogLike
import scala.concurrent.duration._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}


  val comm = new ClientCommManager(
    actorLoader = actorLoader,
    kmBuilder = KMBuilder(),
    commRegistrar = commRegistrar
  )

  // TODO: hide this? just heartbeat to see if kernel is reachable?
  def heartbeat(failure: () => Unit): Unit = {
    val future = actorLoader.load(SocketType.Heartbeat) ? HeartbeatMessage

    future.onComplete {
      case Success(_) =>
        logger.info("Client received heartbeat.")
      case Failure(_) =>
        failure()
        logger.info("There was an error receiving heartbeat from kernel.")
    }
  }

  def shutdown() = {
    logger.info("Shutting down client")
    actorSystem.terminate()
  }
} 
Example 185
Source File: MagicManager.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.magic

import org.apache.toree.plugins.{Plugin, PluginMethodResult, PluginManager}
import org.slf4j.LoggerFactory

import scala.annotation.tailrec
import scala.language.dynamics
import scala.runtime.BoxedUnit
import scala.util.{Try, Failure, Success}

class MagicManager(private val pluginManager: PluginManager) extends Dynamic {
  protected val logger = LoggerFactory.getLogger(this.getClass.getName)
  
  @throws[MagicNotFoundException]
  def findMagic(name: String): Magic = {
    @tailrec def inheritsMagic(klass: Class[_]): Boolean = {
      if (klass == null) false
      else if (klass.getInterfaces.exists(classOf[Magic].isAssignableFrom)) true
      else inheritsMagic(klass.getSuperclass)
    }

    val magics = pluginManager.plugins
      .filter(p => inheritsMagic(p.getClass))
      .filter(_.simpleName.split("\\.").last.toLowerCase == name.toLowerCase)

    if (magics.size <= 0){
      logger.error(s"No magic found for $name!")
      throw new MagicNotFoundException(name)
    } else if (magics.size > 1) {
      logger.warn(s"More than one magic found for $name!")
    }

    magics.head.asInstanceOf[Magic]
  }

  @throws[MagicNotFoundException]
  def applyDynamic(name: String)(args: Any*): MagicOutput = {
    val arg = args.headOption.map(_.toString).getOrElse("")

    import org.apache.toree.plugins.Implicits._
    val result = pluginManager.fireEventFirstResult(
      name.toLowerCase(),
      "input" -> arg
    )

    result match {
      case Some(r: PluginMethodResult) => handleMagicResult(name, r.toTry)
      case None => throw new MagicNotFoundException(name)
    }
  }

  private def handleMagicResult(name: String, result: Try[Any]): MagicOutput = result match {
    case Success(magicOutput) => magicOutput match {
      case out: MagicOutput => out
      case null | _: BoxedUnit => MagicOutput()
      case cmo: Map[_, _]
        if cmo.keys.forall(_.isInstanceOf[String]) &&
          cmo.values.forall(_.isInstanceOf[String]) =>
        MagicOutput(cmo.asInstanceOf[Map[String, String]].toSeq:_*)
      case unknown =>
        val message =
          s"""Magic $name did not return proper magic output
             |type. Expected ${classOf[MagicOutput].getName}, but found
             |type of ${unknown.getClass.getName}.""".trim.stripMargin
        logger.warn(message)
        MagicOutput("text/plain" -> message)
    }
    case Failure(t) =>
      val message =  s"Magic $name failed to execute with error: \n${t.getMessage}"
      logger.warn(message, t)
      MagicOutput("text/plain" -> message)
  }
} 
Example 186
Source File: Main.scala    From 006877   with MIT License 5 votes vote down vote up
package com.goticks

import scala.concurrent.Future
import scala.util.{Failure, Success}

import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging
import akka.util.Timeout

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

import com.typesafe.config.{ Config, ConfigFactory }

object Main extends App
    with RequestTimeout {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host") // 설정으로부터 호스트와 포트를 가져온다
  val port = config.getInt("http.port")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher  // bindAndHandle은 비동기적이며, ExecutionContext를 암시적으로 사용해야 한다

  val api = new RestApi(system, requestTimeout(config)).routes // RestApi는 HTTP 루트를 제공한다
 
  implicit val materializer = ActorMaterializer()
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port) // RestApi 루트를 가지고 HTTP 서버를 시작한다
 
  val log =  Logging(system.eventStream, "go-ticks")
  bindingFuture.map { serverBinding =>
    log.info(s"RestApi bound to ${serverBinding.localAddress} ")
  }.onComplete { 
    case Success(v) =>
	case Failure(ex) =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
}

trait RequestTimeout {
  import scala.concurrent.duration._
  def requestTimeout(config: Config): Timeout = {
    val t = config.getString("akka.http.server.request-timeout")
    val d = Duration(t)
    FiniteDuration(d.length, d.unit)
  }
} 
Example 187
Source File: Startup.scala    From 006877   with MIT License 5 votes vote down vote up
package com.goticks

import scala.concurrent.Future
import scala.util.{Failure, Success}

import akka.actor.ActorSystem
import akka.event.Logging

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Route

import akka.stream.ActorMaterializer

trait Startup extends RequestTimeout {
  def startup(api: Route)(implicit system: ActorSystem) = {
    val host = system.settings.config.getString("http.host") // 설정에서 호스트와 포트를 가져온다
    val port = system.settings.config.getInt("http.port")
    startHttpServer(api, host, port)
  }

  def startHttpServer(api: Route, host: String, port: Int)
      (implicit system: ActorSystem) = {
    implicit val ec = system.dispatcher  // bindAndHandle에는 암시적인 ExecutionContext가 필요하다
    implicit val materializer = ActorMaterializer()
    val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port) // HTTP 서버를 시작한다
   
    val log = Logging(system.eventStream, "go-ticks")
    bindingFuture.map { serverBinding =>
      log.info(s"RestApi bound to ${serverBinding.localAddress} ")
    }.onComplete { 
      case Success(v) =>
	  case Failure(ex) =>
        log.error(ex, "Failed to bind to {}:{}!", host, port)
        system.terminate()
    }
  }
} 
Example 188
Source File: EchoActorTest.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.testdriven

import akka.testkit.{ TestKit, ImplicitSender }
import akka.actor.{ Props, Actor, ActorSystem }
import org.scalatest.WordSpecLike

import akka.util.Timeout
import scala.concurrent.Await
import scala.util.{ Success, Failure }

import scala.language.postfixOps


class EchoActorTest extends TestKit(ActorSystem("testsystem"))
  with WordSpecLike
  with ImplicitSender
  with StopSystemAfterAll {


  "An EchoActor" must {
    "Reply with the same message it receives" in {

      import akka.pattern.ask
      import scala.concurrent.duration._
      implicit val timeout = Timeout(3 seconds)
      implicit val ec = system.dispatcher
      val echo = system.actorOf(Props[EchoActor], "echo1")
      val future = echo.ask("some message")
      future.onComplete {
        case Failure(_)   => //실패 처리
        case Success(msg) => //성공 처리
      }

      Await.ready(future, timeout.duration)
    }

    "Reply with the same message it receives without ask" in {
      val echo = system.actorOf(Props[EchoActor], "echo2")
      echo ! "some message"
      expectMsg("some message")

    }

  }
}


class EchoActor extends Actor {
  def receive = {
    case msg =>
      sender() ! msg
  }
} 
Example 189
Source File: LogJson.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, Path }
import java.io.File
import java.time.ZonedDateTime

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.{ Success, Failure }

import akka.Done
import akka.actor._
import akka.util.ByteString

import akka.stream.{ ActorAttributes, ActorMaterializer, IOResult }
import akka.stream.scaladsl.JsonFraming
import akka.stream.scaladsl.{ FileIO, BidiFlow, Flow, Framing, Keep, Sink, Source }

import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import spray.json._

object LogJson extends EventMarshalling 
    with NotificationMarshalling 
    with MetricMarshalling {
  def textInFlow(maxLine: Int) = {
    Framing.delimiter(ByteString("\n"), maxLine)
    .map(_.decodeString("UTF8"))
    .map(LogStreamProcessor.parseLineEx)
    .collect { case Some(e) => e }
  }

  def jsonInFlow(maxJsonObject: Int) = {
    JsonFraming.objectScanner(maxJsonObject) 
      .map(_.decodeString("UTF8").parseJson.convertTo[Event])
  }

  def jsonFramed(maxJsonObject: Int) =
    JsonFraming.objectScanner(maxJsonObject) 

  val jsonOutFlow = Flow[Event].map { event => 
    ByteString(event.toJson.compactPrint)
  }

  val notifyOutFlow = Flow[Summary].map { ws => 
    ByteString(ws.toJson.compactPrint)
  }

  val metricOutFlow = Flow[Metric].map { m => 
    ByteString(m.toJson.compactPrint)
  }

  val textOutFlow = Flow[Event].map{ event => 
    ByteString(LogStreamProcessor.logLine(event))
  }

  def logToJson(maxLine: Int) = {
    BidiFlow.fromFlows(textInFlow(maxLine), jsonOutFlow)
  }

  def jsonToLog(maxJsonObject: Int) = {
    BidiFlow.fromFlows(jsonInFlow(maxJsonObject), textOutFlow)
  }

  def logToJsonFlow(maxLine: Int) = {
    logToJson(maxLine).join(Flow[Event])
  }

  def jsonToLogFlow(maxJsonObject: Int) = {
    jsonToLog(maxJsonObject).join(Flow[Event])
  }
} 
Example 190
Source File: GridWriter.scala    From scala-plotly-client   with MIT License 5 votes vote down vote up
package co.theasi.plotly.writer

import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.JsonDSL._

import scala.util.{ Try, Success, Failure }

import co.theasi.plotly.Grid

object GridWriter {

  def draw(
      grid: Grid,
      fileName: String,
      fileOptions: FileOptions = FileOptions()
  )(implicit server: Server): GridFile = {
    if(fileOptions.overwrite) { deleteIfExists(fileName) }
    val request = Api.post("grids",
      compact(render(gridAsJson(grid, fileName))))
    val parsedResponse = Api.despatchAndInterpret(request)
    GridFile.fromResponse(parsedResponse \ "file")
  }

  private def gridAsJson(grid: Grid, fileName: String): JObject = {
    val columnsAsJson = grid.columns.toIterator.zipWithIndex.map {
      case ((columnName, columnValues), index) =>
        ColumnWriter.toJson(columnValues, columnName, index)
    }
    val jsonObj = columnsAsJson.reduce {
      (memo, current) => memo ~ current
    }
    (
      ("data" -> ("cols" -> jsonObj)) ~
      ("filename" -> fileName)
    )
  }

  private def deleteIfExists(fileName: String)(implicit server: Server) {
    Try { GridFile.fromFileName(fileName) } match {
      case Success(grid) => // exists already -> delete
        Api.despatchAndInterpret(Api.delete(s"grids/${grid.fileId}"))
      case Failure(PlotlyException("Not found.")) => // good to go
      case Failure(e) => throw e // some other error -> re-throw
    }
  }

} 
Example 191
Source File: FavouriteSong.scala    From PureCSV   with Apache License 2.0 5 votes vote down vote up
package purecsv.example.favouritesong

import au.com.bytecode.opencsv.CSVParser
import org.apache.spark.Accumulator
import org.apache.spark.AccumulatorParam.LongAccumulatorParam
import org.apache.spark.rdd.RDD
import org.joda.time.Period
import org.joda.time.format.ISOPeriodFormat
import purecsv.safe._
import purecsv.safe.converter.{RawFieldsConverter, StringConverter}

import scala.util.{Failure, Success, Try}


case class Song(title: String, artist: String, album: String, length: Period, likes: Int)


  def tryParseSong(brokenAcc: Accumulator[Long], str: String): Option[Song] = {
    // we try to parse the record raw events and then, if successful, we try to convert
    // the raw fields to a Song instance
    val errorOrSong = Try(csvParser.parseLine(str)).flatMap(rawFields => RawFieldsConverter[Song].tryFrom(rawFields))

    errorOrSong match {
      case Success(song) => Some(song)
      case Failure(error) => { brokenAcc += 1; None }
    }
  }

} 
Example 192
Source File: package.scala    From PureCSV   with Apache License 2.0 5 votes vote down vote up
package purecsv.safe.converter.defaults

import purecsv.safe.converter.StringConverter
import shapeless.{Generic, ::, HList, HNil}

import scala.util.{Failure, Success, Try}


package object rawfields {

  import purecsv.safe.converter.RawFieldsConverter

  def illegalConversion(what: String, typ: String) = {
    Failure(new IllegalArgumentException(s"$what cannot be converter to a value of type $typ"))
  }

  implicit val deriveHNil = new RawFieldsConverter[HNil] {
    override def tryFrom(s: Seq[String]): Try[HNil] = s match {
      case Nil => Success(HNil)
      case _       => illegalConversion(s.mkString("[",", ","]"), "HNil")
    }
    override def to(a: HNil): Seq[String] = Seq.empty
  }

  implicit def deriveHCons[V, T <: HList]
                          (implicit sc:  StringConverter[V],
                                   fto: RawFieldsConverter[T])
                                      : RawFieldsConverter[V :: T] = new RawFieldsConverter[V :: T] {
    override def tryFrom(s: Seq[String]): Try[V :: T] = s match {
      case Nil => illegalConversion("", classOf[V :: T].toString)
      case _   => for {
        head <- sc.tryFrom(s.head)
        tail <- fto.tryFrom(s.tail)
      } yield head :: tail
    }

    override def to(a: ::[V, T]): Seq[String] = sc.to(a.head) +: fto.to(a.tail)
  }

  implicit def deriveClass[A, R](implicit gen: Generic.Aux[A, R],
                                         conv: RawFieldsConverter[R])
                                             : RawFieldsConverter[A] = new RawFieldsConverter[A] {
    override def tryFrom(s: Seq[String]): Try[A] = conv.tryFrom(s).map(gen.from)
    override def to(a: A): Seq[String] = conv.to(gen.to(a))
  }
} 
Example 193
Source File: TryUtil.scala    From PureCSV   with Apache License 2.0 5 votes vote down vote up
package purecsv.safe

import scala.collection.immutable
import scala.util.{Failure, Success, Try}


package object tryutil {

  implicit class IterableOfTry[A](iter: Iterable[Try[A]]) {
    
    lazy val getSuccessesAndFailures: (immutable.List[(Int,A)],
                                       immutable.List[(Int,Throwable)]) = {
      val successes = scala.collection.mutable.Buffer[(Int,A)]()
      val failures = scala.collection.mutable.Buffer[(Int,Throwable)]()
      iter.zipWithIndex.foreach {
        case (Success(a),i) => successes += (i+1 -> a)
        case (Failure(f),i) => failures  += (i+1 -> f)
      }
      (successes.toList,failures.toList)
    }
  }

  implicit class IteratorOfTry[A](iter: Iterator[Try[A]]) extends IterableOfTry[A](iter.toIterable)

} 
Example 194
Source File: TryUtilSuite.scala    From PureCSV   with Apache License 2.0 5 votes vote down vote up
package purecsv.safe

import org.scalatest.{Matchers, FunSuite}

import scala.util.{Failure, Success}


class MyException(val s: String) extends RuntimeException(s) {
  override def equals(o: Any): Boolean = o match {
    case e:MyException => s.equals(e.s)
    case _ => false
  }
}

class TryUtilSuite extends FunSuite with Matchers {
  import tryutil._

  def failure(s: String) = Failure(new MyException(s))

  test("getSuccessesAndFailures partition an Iterator[Try[A]] into successes and failures") {
    val startingSuccesses = Seq(Success(1),Success(2))
    val startingFailures = Seq(failure("3"),failure("4"))
    val expectedSuccesses = Seq(1 -> 1, 2 -> 2)
    val expectedFailures = Seq(3 -> new MyException("3"), 4 -> new MyException("4"))
    val (resSuccesses,resFailures) = (startingSuccesses ++ startingFailures).getSuccessesAndFailures
    resSuccesses should be (expectedSuccesses)
    resFailures should be (expectedFailures)
  }
} 
Example 195
Source File: DbSerialization.scala    From mqtt-mongo   with MIT License 5 votes vote down vote up
package com.izmailoff.mm.util

import com.izmailoff.mm.config.GlobalAppConfig.Application.MqttMongo
import com.izmailoff.mm.config.SerializationFormat._
import com.mongodb.DBObject
import com.mongodb.casbah.commons.MongoDBObject
import org.bson.types.BasicBSONList

import scala.util.{Failure, Success, Try}

object DbSerialization {

  val PAYLOAD_FIELD = "payload"

  def serialize(payload: Array[Byte]): DBObject =
    MqttMongo.serializationFormat match {
      case JSON => serializeToJson(payload)
      case BINARY => serializeToBinary(payload)
      case STRING => serializeToString(payload)
    }

  def serializeToJson(payload: Array[Byte]) =
    parseSafe(new String(payload))

  def serializeToBinary(payload: Array[Byte]) =
    MongoDBObject(PAYLOAD_FIELD -> payload)

  def serializeToString(payload: Array[Byte]) =
    MongoDBObject(PAYLOAD_FIELD -> new String(payload))

  def parseSafe(msg: String): DBObject =
    Try {
      com.mongodb.util.JSON.parse(msg).asInstanceOf[DBObject]
    } match {
      case Failure(e) =>
        MongoDBObject(PAYLOAD_FIELD -> msg)
      case Success(json) =>
        json match {
          case j: BasicBSONList => MongoDBObject(PAYLOAD_FIELD -> json)
          case other => other
        }
    }
} 
Example 196
Source File: Retry.scala    From spark-deployer   with Apache License 2.0 5 votes vote down vote up
package sparkdeployer

import scala.util.{Failure, Success, Try}
import org.slf4s.Logging

object Retry extends Logging {
  @annotation.tailrec
  def apply[T](op: Int => T, attempt: Int, maxAttempts: Int): T = {
    Try { op(attempt) } match {
      case Success(x) => x
      case Failure(e) if attempt < maxAttempts =>
        Thread.sleep(15000)
        apply(op, attempt + 1, maxAttempts)
      case Failure(e) => throw e
    }
  }
  def apply[T](op: Int => T)(implicit clusterConf: ClusterConf): T = apply(op, 1, clusterConf.retryAttempts)
} 
Example 197
Source File: MerkleBuffersTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.node.networking.peer

import _root_.org.scalatest.compatible.Assertion
import org.bitcoins.core.protocol.blockchain.{Block, MerkleBlock}
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.core.util.FutureUtil
import org.bitcoins.node.{NodeCallbacks, OnMerkleBlockReceived}
import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.testkit.BitcoinSTestAppConfig
import org.bitcoins.testkit.core.gen.{
  BlockchainElementsGenerator,
  TransactionGenerators
}
import org.bitcoins.testkit.util.BitcoinSAsyncTest
import org.scalacheck.Gen

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

class MerkleBuffersTest extends BitcoinSAsyncTest {

  implicit private val config: NodeAppConfig =
    BitcoinSTestAppConfig.getSpvTestConfig().nodeConf

  behavior of "MerkleBuffers"

  it must "match a merkle block with its corresponding transactions" in {

    val txsAndBlockGen: Gen[(Seq[Transaction], Seq[Transaction], Block)] = for {
      txs <- TransactionGenerators.nonEmptySmallTransactions
      otherTxs <- TransactionGenerators.nonEmptySmallTransactions
      block <- BlockchainElementsGenerator.block(txs)
    } yield (txs, otherTxs, block)

    forAllAsync(txsAndBlockGen) {

      case (txs, otherTxs, block) =>
        var receivedExpectedTXs: Option[Try[Assertion]] = None
        var callbackCount: Int = 0
        val callback: OnMerkleBlockReceived = { (_, merkleTxs) =>
          receivedExpectedTXs = Some(
            Try(assert(txs == merkleTxs,
                       "Received TXs in callback was not the ones we put in")))
          callbackCount = callbackCount + 1
          FutureUtil.unit
        }
        val callbacks = NodeCallbacks(onMerkleBlockReceived = Vector(callback))

        val merkle = MerkleBlock(block, txs.map(_.txId))
        val _ = MerkleBuffers.putMerkle(merkle)

        val txFs = txs.map { tx =>
          MerkleBuffers
            .putTx(tx, callbacks)
            .map(matches =>
              assert(
                matches,
                s"TX ${tx.txIdBE} did not match any merkle block in MerkleBuffers"))
        }

        val otherTxFs = otherTxs.map { tx =>
          MerkleBuffers
            .putTx(tx, callbacks)
            .map(matches =>
              assert(
                !matches,
                s"Unrelated TX ${tx.txIdBE} did match merkle block in MerkleBuffers"))
        }

        for {
          _ <- Future.sequence(txFs)
          _ <- Future.sequence(otherTxFs)
        } yield {
          assert(callbackCount != 0,
                 "Callback was not called after processing all TXs!")

          assert(callbackCount == 1,
                 s"Callback was called multiple times: $callbackCount")

          receivedExpectedTXs match {
            case None                     => fail("Callback was never called")
            case Some(Success(assertion)) => assertion
            case Some(Failure(exc))       => fail(exc)
          }
        }

    }

  }
} 
Example 198
Source File: RpcUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.rpc.util

import java.net.ServerSocket

import akka.actor.ActorSystem
import org.bitcoins.rpc.client.common.BitcoindRpcClient

import scala.annotation.tailrec
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Random, Success, Try}

abstract class RpcUtil extends AsyncUtil {

  def awaitServerShutdown(
      server: BitcoindRpcClient,
      duration: FiniteDuration = 300.milliseconds,
      maxTries: Int = 50)(implicit system: ActorSystem): Future[Unit] = {
    retryUntilSatisfiedF(() => server.isStoppedF, duration, maxTries)
  }

  
  @tailrec
  final def randomPort: Int = {
    val MAX = 65535 // max tcp port number
    val MIN = 1025 // lowest port not requiring sudo
    val port = Math.abs(Random.nextInt(MAX - MIN) + (MIN + 1))
    val attempt = Try {
      val socket = new ServerSocket(port)
      socket.close()
      socket.getLocalPort
    }

    attempt match {
      case Success(value) => value
      case Failure(_)     => randomPort
    }
  }
}

object RpcUtil extends RpcUtil 
Example 199
Source File: AddressTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.protocol

import org.bitcoins.testkit.core.gen.AddressGenerator
import org.bitcoins.testkit.util.{BitcoinSUnitTest, TestUtil}

import scala.util.{Failure, Success}

class AddressTest extends BitcoinSUnitTest {

  behavior of "Address"

  it must "have serialization symmetry" in {
    forAll(AddressGenerator.address) { addr =>
      val fromSPK = Address
        .fromScriptPubKeyT(addr.scriptPubKey, addr.networkParameters)
      fromSPK match {
        case Success(newAddr)   => assert(newAddr.value == addr.value)
        case Failure(exception) => fail(exception.getMessage)
      }

      val fromStringT = Address.fromStringT(addr.value)
      fromStringT match {
        case Success(newAddr)   => assert(newAddr.value == addr.value)
        case Failure(exception) => fail(exception.getMessage)
      }
    }
  }

  it must "serialize a bech32 address correctly" in {
    TestUtil.bech32Address.toString must be(
      "bcrt1qq6w6pu6zq90az9krn53zlkvgyzkyeglzukyepf")
  }
} 
Example 200
Source File: CompatEitherTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.compat

import org.bitcoins.chain.blockchain.BlockchainUpdate.Successful
import org.bitcoins.testkit.util.BitcoinSUnitTest

import scala.util.{Failure, Success}

class CompatEitherTest extends BitcoinSUnitTest {

  it should "create left and right" in {

    val right = Right("Right")
    val compatRight = CompatEither(right)
    assert(compatRight.isInstanceOf[CompatRight[Nothing, String]])
    assert(compatRight.toTry == Success("Right"))

    val exception = new RuntimeException("Left")
    val left = Left(exception)
    val compatLeft = CompatEither(left)
    assert(compatLeft.isInstanceOf[CompatLeft[RuntimeException, Nothing]])
    assert(compatLeft.toTry == Failure(exception))
  }

  it should "do traverse operations" in {
    val mappedRight = CompatEither(Right(12)).map(_ => "flower")
    assert(mappedRight == CompatEither(Right("flower")))
    val mappedLeft = CompatEither(Left(12)).map(_ => "flower")
    assert(mappedLeft == CompatEither(Left(12)))

    val flatmappedRight: CompatEither[Int, String] =
      CompatEither(Right(12)).flatMap(_ => CompatEither(Right("flower")))
    assert(flatmappedRight == CompatRight("flower"))
    val flatmappedLeft =
      CompatEither(Left(12)).flatMap(_ => CompatEither(Left("21")))
    assert(flatmappedLeft == CompatLeft(12))

    val foldedRight = CompatEither(Right(12)).fold({ _ =>
                                                     "left"
                                                   },
                                                   { _ =>
                                                     "right"
                                                   })
    assert(foldedRight == "right")
    val foldedLeft = CompatEither(Left(12)).fold({ _ =>
                                                   "left"
                                                 },
                                                 { _ =>
                                                   "right"
                                                 })
    assert(foldedLeft == "left")
  }

}