com.typesafe.config.Config Scala Examples

The following examples show how to use com.typesafe.config.Config. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: HasDex.scala    From matcher   with MIT License 8 votes vote down vote up
package com.wavesplatform.dex.it.dex

import java.util.Properties
import java.util.concurrent.ThreadLocalRandom

import cats.Functor
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.it.fp.CanExtract
import mouse.any._
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}

import scala.collection.JavaConverters._

trait HasDex { self: BaseContainersKit =>
  private val defaultTag = Option(System.getenv("DEX_TAG")).getOrElse("latest")

  protected implicit def toDexExplicitGetOps[F[_]: CanExtract: Functor](self: DexApi[F]): DexApiOps.ExplicitGetDexApiOps[F] = {
    new DexApiOps.ExplicitGetDexApiOps[F](self)
  }

  protected def dexInitialSuiteConfig: Config = ConfigFactory.empty()

  protected lazy val dexRunConfig: Config = dexQueueConfig(ThreadLocalRandom.current.nextInt(0, Int.MaxValue))

  protected def kafkaServer: Option[String] = Option { System.getenv("KAFKA_SERVER") }

  protected def dexQueueConfig(queueId: Int): Config = {
    kafkaServer.fold { ConfigFactory.empty() } { kafkaServer =>
      ConfigFactory.parseString(s"""waves.dex.events-queue {
                                   |  type = kafka
                                   |  kafka {
                                   |    servers = "$kafkaServer"
                                   |    topic = "dex-$queueId"
                                   |  }
                                   |}""".stripMargin)
    }
  }

  protected def createDex(name: String,
                          runConfig: Config = dexRunConfig,
                          suiteInitialConfig: Config = dexInitialSuiteConfig,
                          tag: String = defaultTag): DexContainer =
    DexContainer(name, networkName, network, getIp(name), runConfig, suiteInitialConfig, localLogsDir, tag) unsafeTap addKnownContainer

  lazy val dex1: DexContainer = createDex("dex-1")

  protected def createKafkaTopic(name: String): Unit = kafkaServer.foreach { server =>
    val properties = new Properties()
    properties.putAll(
      Map(
        "bootstrap.servers"  -> server,
        "group.id"           -> s"create-$name",
        "key.deserializer"   -> "org.apache.kafka.common.serialization.StringDeserializer",
        "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"
      ).asJava
    )

    val adminClient = AdminClient.create(properties)

    try {
      val newTopic = new NewTopic(name, 1, 1.toShort)
      adminClient.createTopics(java.util.Collections.singletonList(newTopic))
    } finally {
      adminClient.close()
    }
  }
} 
Example 2
Source File: Launcher.scala    From sparkplug   with MIT License 6 votes vote down vote up
package springnz.sparkplug.client

import java.net.{ URLEncoder, InetAddress }

import better.files._
import com.typesafe.config.{ ConfigRenderOptions, Config }
import org.apache.spark.launcher.SparkLauncher
import springnz.sparkplug.util.{ BuilderOps, ConfigUtils, Logging, Pimpers }

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{ Properties, Try }

object Launcher extends Logging {
  import BuilderOps._
  import Pimpers._

  def startProcess(launcher: SparkLauncher): Future[Unit] = {
    val processFuture = Future {
      launcher.launch()
    }.withErrorLog("Failed to launch: ")
    processFuture.flatMap {
      process ⇒ executeProcess(process)
    }
  }

  private def executeProcess(process: Process): Future[Unit] = Future {
    val outStream = scala.io.Source.fromInputStream(process.getInputStream)
    for (line ← outStream.getLines()) {
      log.info(line)
    }
    val errorStream = scala.io.Source.fromInputStream(process.getErrorStream)
    for (line ← errorStream.getLines()) {
      log.info(line)
    }
    process.waitFor()
  }

  def launch(clientAkkaAddress: String,
    jarPath: File,
    mainJarPattern: String,
    mainClass: String,
    sparkConfig: Config,
    akkaRemoteConfig: Option[Config],
    sendJars: Boolean = true): Try[Future[Unit]] = Try {

    val fullExtraJarFolder = jarPath.pathAsString

    val sparkHome = Properties.envOrNone("SPARK_HOME")
    val sparkMaster = Properties.envOrElse("SPARK_MASTER", s"spark://${InetAddress.getLocalHost.getHostAddress}:7077")
    log.debug(s"Spark master set to: $sparkMaster")

    // TODO: enable this functionality (need Spark 1.5 for this)
    //    val sparkArgs: Array[String] = config.getString("spark.submit.sparkargs").split(' ')

    if (!sparkMaster.startsWith("local[") && !sparkHome.isDefined)
      throw new RuntimeException("If 'SPARK_MASTER' is not set to local, 'SPARK_HOME' must be set.")

    val appName = mainClass.split('.').last

    val mainJar = jarPath.glob(mainJarPattern).collectFirst { case f ⇒ f.pathAsString }

    val configVars: Seq[(String, String)] = ConfigUtils.configFields(sparkConfig).toSeq

    val akkaRemoteConfigString = akkaRemoteConfig.map { config ⇒
      val configString = config.root().render(ConfigRenderOptions.concise())
      URLEncoder.encode(configString, "UTF-8")
    }

    val launcher = (new SparkLauncher)
      .setIfSome[String](mainJar) { (l, mj) ⇒ l.setAppResource(mj) }
      .setMainClass(mainClass)
      .setAppName(appName)
      .setMaster(sparkMaster)
      .setIfSome[String](sparkHome) { (l, sh) ⇒ l.setSparkHome(sh) }
      .addAppArgs("appName", appName)
      .addAppArgs("clientAkkaAddress", clientAkkaAddress)
      .setIfSome(akkaRemoteConfigString) { (l, config) ⇒ l.addAppArgs("remoteAkkaConfig", config) }
      .setFoldLeft(configVars) { case (launcher, (key, value)) ⇒ launcher.setConf(key, value) }
      .setDeployMode(sparkConfig.getString("spark.deploymode"))

    val extraJarFiles = jarPath.glob("*.jar")
      .map { case f ⇒ f.pathAsString }
      .filterNot(_.contains("/akka-"))

    val launcherWithJars =
      if (sendJars)
        extraJarFiles.foldLeft(launcher) { case (l, jarFile) ⇒ l.addJar(jarFile) }
      else if (extraJarFiles.length == 0) launcher
      else launcher
        .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, s"$fullExtraJarFolder/*")
        .setConf(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH, s"$fullExtraJarFolder/*")

    startProcess(launcherWithJars)
  }

} 
Example 3
Source File: EmailSenderConfig.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.mail

import java.util.Properties
import javax.mail.{Authenticator, PasswordAuthentication, Session}

import com.typesafe.config.{Config, ConfigFactory}

case class EmailSenderAuthorizationConfig(
    user: String,
    password: String)

case class EmailSenderConfig(
    smtpHost: String,
    smtpPort: Int,
    from: String,
    authorizationConfig: Option[EmailSenderAuthorizationConfig]) {

  val sessionProperties: Properties = {
    val res = new Properties()
    res.put("mail.smtp.host", smtpHost)
    res.put("mail.smtp.port", smtpPort.toString)
    res.put("mail.from", from)
    res
  }

  private def mailAuthenticator: Option[Authenticator] = authorizationConfig.map { auth =>
    new Authenticator {
      override def getPasswordAuthentication: PasswordAuthentication = {
        new PasswordAuthentication(auth.user, auth.password)
      }
    }
  }

  def session: Session = mailAuthenticator.map { authenticator =>
    Session.getInstance(sessionProperties, authenticator)
  }.getOrElse(Session.getInstance(sessionProperties))
}

object EmailSenderConfig {
  def apply(config: Config): EmailSenderConfig = {
    val smtpHost = config.getString(smtpHostKey)
    val smtpPort = config.getInt(smtpPortKey)
    val from = config.getString(fromKey)
    val auth = EmailSenderAuthorizationConfig(config)
    EmailSenderConfig(
      smtpHost = smtpHost,
      smtpPort = smtpPort,
      from = from,
      authorizationConfig = auth)
  }
  def apply(): EmailSenderConfig = EmailSenderConfig(ConfigFactory.load().getConfig(emailSenderKey))

  val emailSenderKey = "email-sender"

  val smtpHostKey = "smtp.host"
  val smtpPortKey = "smtp.port"
  val fromKey = "from"
}

object EmailSenderAuthorizationConfig {
  def apply(config: Config): Option[EmailSenderAuthorizationConfig] = {
    if (config.hasPath(user) && config.hasPath(password)) {
      Some(EmailSenderAuthorizationConfig(
        user = config.getString(user),
        password = config.getString(password)
      ))
    } else {
      None
    }
  }

  val user = "user"
  val password = "pass"
} 
Example 4
Source File: AutoPartitionConsumer.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new AutoPartitionConsumer(consumerConf, actorConf)))
  }
}

class AutoPartitionConsumer(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )
  context.watch(consumer)

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 5
Source File: ConsumerToProducer.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka._
import cakesolutions.kafka.{KafkaConsumer, KafkaProducer}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}

import scala.concurrent.duration._


  def apply(consumerConfig: Config, producerConfig: Config): ActorRef = {

    // Create KafkaConsumerActor config with bootstrap.servers specified in Typesafe config
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(consumerConfig)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds, 5)

    // Create KafkaProducerActor config with defaults and bootstrap.servers specified in Typesafe config
    val producerConf = KafkaProducer.Conf(new StringSerializer, new StringSerializer).withConf(producerConfig)

    val system = ActorSystem()
    system.actorOf(Props(new ConsumerToProducer(consumerConf, actorConf, producerConf)))
  }
}

class ConsumerToProducer(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf,
  producerConf: KafkaProducer.Conf[String, String]) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  // The KafkaConsumerActor
  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )
  context.watch(consumer)

  // The KafkaProducerActor
  private val producer = context.actorOf(KafkaProducerActor.props(producerConf))

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records)

    // Confirmed Offsets from KafkaProducer
    case o: Offsets =>
      consumer ! Confirm(o, commit = true)
  }

  // Demonstrates some transformation of the messages before forwarding to KafkaProducer
  private def processRecords(records: ConsumerRecords[String, String]) = {
    val transformedRecords = records.pairs.map { case (key, value) =>
      (key, value + ".")
    }

    // Send records to Topic2.  Offsets will be sent back to this actor once confirmed.
    producer ! ProducerRecords.fromKeyValues[String, String]("topic2", transformedRecords, Some(records.offsets), None)

    // Could have sent them like this if we didn't first transform:
    // producer ! ProducerRecords.fromConsumerRecords("topic2", records, None)
  }
} 
Example 6
Source File: ConsumerRecovery.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, OneForOneStrategy, Props, SupervisorStrategy}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10) {
    case _: KafkaConsumerActor.ConsumerException =>
      log.info("Consumer exception caught. Restarting consumer.")
      SupervisorStrategy.Restart
    case _ =>
      SupervisorStrategy.Escalate
  }

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 7
Source File: AutoPartitionConsumerWithManualOffset.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor._
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor, Offsets}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new AutoPartitionConsumerWithManualOffset(consumerConf, actorConf)))
  }
}

class AutoPartitionConsumerWithManualOffset(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartitionWithManualOffset(List("topic1"), assignedListener, revokedListener)

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }

  private def assignedListener(tps: List[TopicPartition]): Offsets = {
    log.info("Partitions have been assigned" + tps.toString())

    // Should load the offsets from a persistent store and any related state
    val offsetMap = tps.map{ tp =>
      tp -> 0l
    }.toMap

    // Return the required offsets for the assigned partitions
    Offsets(offsetMap)
  }

  private def revokedListener(tps: List[TopicPartition]): Unit = {
    log.info("Partitions have been revoked" + tps.toString())
    // Opportunity to clear any state for the revoked partitions
    ()
  }
} 
Example 8
Source File: Postman.scala    From full-scala-stack   with Apache License 2.0 5 votes vote down vote up
package mail

import com.typesafe.config.Config
import courier.{ Envelope, Mailer }
import zio.ZIO


trait CourierPostman extends Postman {
  val configKey: String
  val config: Config

  override val postman: Postman.Service[Any] = new Postman.Service[Any] {
    lazy val mailer: Mailer = {
      val localhost = config.getString(s"$configKey.smtp.localhost")
      System.setProperty("mail.smtp.localhost", localhost)
      System.setProperty("mail.smtp.localaddress", localhost)
      val auth = config.getBoolean(s"$configKey.smtp.auth")
      if (auth)
        Mailer(config.getString(s"$configKey.smtp.host"), config.getInt(s"$configKey.smtp.port"))
          .auth(auth)
          .as(
            config.getString(s"$configKey.smtp.user"),
            config.getString(s"$configKey.smtp.password")
          )
          .startTls(config.getBoolean(s"$configKey.smtp.startTTLS"))()
      else
        Mailer(config.getString(s"$configKey.smtp.host"), config.getInt(s"$configKey.smtp.port"))
          .auth(auth)()
    }

    override def deliver(email: Envelope): ZIO[Any, Throwable, Unit] =
      ZIO.fromFuture(implicit ec => mailer(email))
  }
} 
Example 9
Source File: HedgeFund.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund

import com.phasmid.hedge_fund.model._
import com.phasmid.hedge_fund.actors._
import com.phasmid.hedge_fund.portfolio.{Portfolio,PortfolioParser}
import akka.actor.{ Actor, ActorSystem, Props, ActorRef }
import com.typesafe.config.{ ConfigFactory, Config }
import scala.io.Source
import scala.concurrent.ExecutionContext.Implicits.global


object HedgeFund extends App {

    val config = ConfigFactory.load()
    implicit val system = ActorSystem("HedgeFund")
    println(s"""${config.getString("name")}, ${config.getString("appVersion")}""")
    val engines: Seq[Query] = config.getString("engine") match {
      case "YQL" => Seq(YQLQuery(config.getString("format"), false))
      case "Google" => Seq(GoogleQuery("NASDAQ"))
      case "YQL,Google" => Seq(YQLQuery(config.getString("format"), false),GoogleQuery("NASDAQ"))
      case _ => Seq()
    }
    println(s"engines: $engines")
    val portfolio = getPortfolio(config)
    val blackboard = system.actorOf(Props.create(classOf[HedgeFundBlackboard]), "blackboard")
    val symbols = getSymbols(config,portfolio)
    for (engine <- engines) blackboard ! ExternalLookup(engine.getProtocol, engine.createQuery(symbols))
    val optionEngine = new GoogleOptionQuery
    symbols foreach {
      s => blackboard ! ExternalLookup(optionEngine.getProtocol, optionEngine.createQuery(List(s)))
    }
    blackboard ! PortfolioUpdate(portfolio)

import scala.language.postfixOps
  def getSymbols(config: Config, portfolio: Portfolio) = {
    // TODO add in the symbols from the portfolio
    config.getString("symbols") split ("\\,") toList;
  }

def getPortfolio(config: Config): Portfolio = {
   val json = Source.fromFile(config.getString("portfolio")) mkString
   val portfolio = PortfolioParser.decode(json)
   println(s"portfolio: $portfolio")
  portfolio
  }
} 
Example 10
Source File: Application.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package controllers

import play.api._
import play.api.mvc._
import akka.actor.{ActorSystem, Props}
import akka.util.Timeout
import akka.pattern.ask
import scala.concurrent._
import scala.concurrent.duration._
import scala.util._
import edu.neu.coe.scala.numerics.Rational
import akka.actor.ActorRef
import com.typesafe.config.{ ConfigFactory, Config }
import actors._
import models._
import spire.math.Real

class Application extends Controller {
  
  val config = ConfigFactory.load()
  val which = config.getString("calculator")
  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)
  implicit val system = ActorSystem("RPN-Calculator")
  val setup = which match {
    case "rational" => Application.getSetupForRational
    case "double" => Application.getSetupForDouble
    case "spire" => Application.getSetupForSpire
    case _ => Console.err.println(s"Unsupported calculator type: $which"); Application.getSetupForRational
  }
  val calculator = system.actorOf(setup _1,setup _2)
  val name: String = setup _3;
  println(s"$name is ready")

  def index() = Action.async {
    val xsf = (calculator ? View).mapTo[Seq[_]]
    xsf map {
      case xs => Ok(s"$name: calculator has the following elements (starting with top): $xs")
    }
  }

  def command(s: String) = Action.async {
    val xtf = (calculator ? s).mapTo[Try[_]] 
    xtf map {
      case Success(x) => Ok(s"""$name: you have entered "$s" and got back $x""")
      case Failure(e) => if (s=="clr") Ok("$name: cleared") else Ok(s"""$name: you entered "$s" which caused error: $e""")
//      case Failure(e) => if (s=="clr") redirect("/") else  Ok(s"""$name: you entered "$s" which caused error: $e""")
    }
  }
}

object Application {
  // TODO move these to model classes
  def getSetupForDouble(implicit system: ActorSystem) = {
		  implicit val lookup: String=>Option[Double] = DoubleMill.constants.get _
      implicit val conv: String=>Try[Double] = DoubleMill.valueOf _
			implicit val parser = new ExpressionParser[Double](conv,lookup)
			val mill = DoubleMill()
			// Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
      // TODO for these methods, return mill and parser instead of props
			(props,"doubleCalculator","Double Calculator")
  }
  // CONSIDER This assumes that we have Rational in our classpath already.
  // I'd like to try the possibility of dynamically loading the Rational stuff.
  // But, that's going to be very tricky, so we'll leave it for now.
    def getSetupForRational(implicit system: ActorSystem) = {
      implicit val lookup: String=>Option[Rational] = RationalMill.constants.get _
      implicit val conv: String=>Try[Rational] = RationalMill.valueOf _
      implicit val parser = new ExpressionParser[Rational](conv,lookup)
      val mill = RationalMill()
      // Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
      (props,"rationalCalculator","Rational Calculator")
  }
  // CONSIDER This assumes that we have Spire in our classpath already.
  def getSetupForSpire(implicit system: ActorSystem) = {
    import spire.implicits._
    import spire.math._
		  implicit val lookup: String=>Option[Real] = SpireMill.constants.get _
      implicit val conv: String=>Try[Real] = SpireMill.valueOf _
			implicit val parser = new ExpressionParser[Real](conv,lookup)
			val mill = SpireMill()
			// Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
			(props,"spireCalculator","Spire Calculator")
  }
} 
Example 11
Source File: CalculatorSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package actors

import org.scalatest._
import akka.testkit.TestActorRef
import scala.concurrent.duration._
import scala.concurrent.Await
import akka.pattern.ask
import scala.util._
import scala.io.Source
import scala.concurrent._
import scala.concurrent.duration._
import com.typesafe.config.{ ConfigFactory, Config }
import akka.actor.{ Actor, ActorSystem, Props, ActorRef }
import akka.util.Timeout
import java.net.URL
import org.scalatest.concurrent._
import org.scalatest._
import org.scalatest.time._
import edu.neu.coe.scala.numerics.Rational
import models._


class CalculatorSpec extends FlatSpec with Matchers with Futures with ScalaFutures with Inside {
  implicit val system = ActorSystem("CountWords")  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)

  "Rational Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Rational]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  it should "yield 1 for 1" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  it should "yield 1 when given floating point problem" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "0.2 0.1 + 10 * 3 /").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  "Double Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Double]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  
  // This test suffers from a very peculiar bug which might even be a bug
  // in the Scala compiler. Kudos to you if you can fix it!!
  ignore should "yield 1 for 1" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Double]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(1.0) => }
  }
} 
Example 12
Source File: SslContextModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.ssl

import cats.effect.Sync
import cats.syntax.functor._
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.sslconfig.ssl.{
  ConfigSSLContextBuilder,
  DefaultKeyManagerFactoryWrapper,
  DefaultTrustManagerFactoryWrapper,
  SSLConfigFactory
}
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}

object SslContextModule {

  private val SslContextEnabledKey = "enabled"

  
  def makeIfEnabled[F[_]: Sync](config: Config, withReference: Boolean = true): F[Option[SSLContext]] = {
    if (config.hasPath(SslContextEnabledKey) && config.getBoolean(SslContextEnabledKey)) {
      make(config, withReference).map(Some(_))
    } else {
      Sync[F].delay(None)
    }

  }

  private def referenceConfigUnsafe(): Config = ConfigFactory.defaultReference().getConfig("ssl-config")

} 
Example 13
Source File: BasicAuthentication.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.http_frontend.authentication
import java.security.MessageDigest

import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import com.typesafe.config.Config

class BasicAuthentication(config: Config) extends HttpAuthentication {
  private val username = config.getString("username")
  private val password = config.getString("password")

  override def authentication(request: HttpRequest): AuthenticationResult = {
    request.header[`Authorization`] match {
      case Some(header) =>
        val receivedCredentials = BasicHttpCredentials(header.credentials.token())

        val authenticated = MessageDigest.isEqual(receivedCredentials.username.getBytes, username.getBytes) &&
          MessageDigest.isEqual(receivedCredentials.password.getBytes, password.getBytes)
        if (authenticated) AuthenticationSucceeded(AuthenticationInfo(Some(s"Logged in as: $username"), None, Seq.empty))
        else AuthenticationForbidden(HttpResponse(403))
      case None =>
        val header = `WWW-Authenticate`(List(HttpChallenge("basic", Some("SumoBot"))))
        AuthenticationForbidden(HttpResponse(401, headers = List(header)))
    }
  }
} 
Example 14
Source File: SumoBotHttpServerOptions.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.http_frontend

import com.sumologic.sumobot.http_frontend.authentication.{HttpAuthentication, Link, NoAuthentication}
import com.typesafe.config.{Config, ConfigFactory}

import scala.collection.JavaConverters._
import scala.util.{Success, Try}

case class SumoBotHttpServerOptions(httpHost: String, httpPort: Int,
                                    origin: String, authentication: HttpAuthentication,
                                    title: String, description: Option[String],
                                    links: Seq[Link])

object SumoBotHttpServerOptions {
  val DefaultOrigin = "*"
  val DefaultAuthentication = new NoAuthentication(ConfigFactory.empty())
  val DefaultTitle = "Sumobot-over-HTTP"

  def fromConfig(config: Config): SumoBotHttpServerOptions = {
    val httpHost = config.getString("host")
    val httpPort = config.getInt("port")
    val origin = if (config.hasPath("origin")) {
      config.getString("origin")
    } else DefaultOrigin

    val authentication = authenticationFromConfig(config)

    val title = if (config.hasPath("title")) {
      config.getString("title")
    } else DefaultTitle

    val description = if (config.hasPath("description")) {
      Some(config.getString("description"))
    } else None

    val links = linksFromConfig(config)

    SumoBotHttpServerOptions(httpHost, httpPort, origin,
      authentication, title, description, links)
  }

  private def authenticationFromConfig(config: Config): HttpAuthentication = {
    Try(config.getConfig("authentication")) match {
      case Success(authenticationConfig)
      if authenticationConfig.getBoolean("enabled") =>
        val clazz = Class.forName(authenticationConfig.getString("class"))
        val constructor = clazz.getConstructor(classOf[Config])
        constructor.newInstance(authenticationConfig).asInstanceOf[HttpAuthentication]

      case _ => DefaultAuthentication
    }
  }

  private def linksFromConfig(config: Config): Seq[Link] = {
    Try(config.getObject("links").asScala) match {
      case Success(links) =>
        links.map {
          case (link, _) =>
            val linkName = config.getString(s"links.$link.name")
            val linkHref = config.getString(s"links.$link.href")
            Link(linkName, linkHref)
        }.toSeq
      case _ => Seq.empty
    }
  }
} 
Example 15
Source File: AWSAccounts.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core.aws

import com.amazonaws.auth.{AWSCredentials, BasicAWSCredentials}
import com.sumologic.sumobot.core.config.ListOfConfigs
import com.typesafe.config.Config

object AWSAccounts {
  def load(config: Config): Map[String, AWSCredentials] = {
    ListOfConfigs.parse(config, "aws") {
      (name, accountConfig) =>
        val key = accountConfig.getString(s"key.id")
        val secret = accountConfig.getString(s"key.secret")
        new BasicAWSCredentials(key, secret)
    }
  }
} 
Example 16
Source File: ListOfConfigs.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core.config

import com.typesafe.config.{Config, ConfigException}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}

object ListOfConfigs {
  def parse[T](config: Config, path: String)(convert: (String, Config) => T): Map[String, T] = {
    Try(config.getObject(path).asScala) match {
      case Success(accounts) =>
        accounts.map {
          obj =>
            val name = obj._1
            name -> convert(name, config.getConfig(path + "." + name))
        }.toMap
      case Failure(e: ConfigException.Missing) =>
        Map.empty
      case Failure(other) =>
        throw other
    }
  }
} 
Example 17
Source File: JenkinsConfiguration.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.plugins.jenkins

import com.typesafe.config.Config

import scala.util.Try

case class JenkinsConfiguration(url: String,
                                username: String,
                                password: String,
                                buildToken: Option[String])

object JenkinsConfiguration {
  def load(config: Config): JenkinsConfiguration = {
    val url = config.getString("url")
    val username = config.getString("username")
    val password = config.getString("password")
    val buildToken = Try(config.getString("build.token")).toOption
    JenkinsConfiguration(url, username, password, buildToken)
  }
} 
Example 18
Source File: ExperimentVariantEventDynamoServiceTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.dynamo.abtesting

import cats.effect.IO
import akka.stream.alpakka.dynamodb.{DynamoClient => AlpakkaClient}
import com.typesafe.config.{Config, ConfigFactory}
import domains.abtesting.events.impl.ExperimentVariantEventDynamoService
import domains.abtesting.AbstractExperimentServiceTest
import domains.abtesting.events.ExperimentVariantEventService
import domains.events.impl.BasicEventStore
import env.DynamoConfig
import libs.logs.ZLogger
import store.dynamo.DynamoClient

class ExperimentVariantEventDynamoServiceTest extends AbstractExperimentServiceTest("DynamoDb") {

  private val region    = "eu-west-1"
  private val host      = "127.0.0.1"
  private val port      = 8001
  private val accessKey = "dummy-access-key"
  private val secretKey = "dummy-secretKey-key"

  override def akkaConfig: Option[Config] = Some(ConfigFactory.parseString(s"""
      |akka.stream.alpakka.dynamodb {
      |  region = "eu-west-1"
      |  host = $host
      |  port = $port
      |  tls = false
      |  parallelism = 32
      |  credentials {
      |    access-key-id = $accessKey
      |    secretKey-key-id = $secretKey
      |  }
      |}
   """.stripMargin))

  def getClient(config: DynamoConfig): AlpakkaClient = {
    val Some(client) = runtime.unsafeRun(DynamoClient.dynamoClient(Some(config)).provideLayer(ZLogger.live))
    client
  }

  override def dataStore(name: String): ExperimentVariantEventService.Service = {
    val config = DynamoConfig("othername",
                              name,
                              region,
                              host,
                              port,
                              tls = false,
                              accessKey = Some(accessKey),
                              secretKey = Some(secretKey))
    ExperimentVariantEventDynamoService(config, getClient(config))
  }
} 
Example 19
Source File: BasicAuthenticationExtension.scala    From scruid   with Apache License 2.0 5 votes vote down vote up
package ing.wbaa.druid.auth.basic

import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.headers.{ Authorization, BasicHttpCredentials }
import com.typesafe.config.{ Config, ConfigFactory, ConfigValueFactory }
import com.typesafe.scalalogging.LazyLogging
import ing.wbaa.druid.client.{
  NoopRequestInterceptor,
  RequestInterceptor,
  RequestInterceptorBuilder
}


class BasicAuthenticationExtension(username: String, password: String)
    extends NoopRequestInterceptor {
  override def interceptRequest(request: HttpRequest): HttpRequest =
    request.withHeaders(Authorization(BasicHttpCredentials(username, password)))

  override def exportConfig: Config =
    ConfigFactory
      .empty()
      .withValue("username", ConfigValueFactory.fromAnyRef(username))
      .withValue("password", ConfigValueFactory.fromAnyRef(password))
}

object BasicAuthenticationExtension extends RequestInterceptorBuilder with LazyLogging {

  override def apply(config: Config): RequestInterceptor = {

    val username =
      Option(config.getString("username")).getOrElse {
        throw new IllegalStateException(
          "BasicAuthenticationExtension requires 'username' configuration parameter to be specified"
        )
      }

    val password =
      Option(config.getString("password")).getOrElse {
        throw new IllegalStateException(
          "BasicAuthenticationExtension requires 'password' configuration parameter to be specified"
        )
      }

    logger.info(s"BasicAuthenticationExtension[username=$username] created")
    new BasicAuthenticationExtension(username, password)
  }
} 
Example 20
Source File: DefaultModule.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.api

import com.typesafe.config.Config
import io.coral.actors.ActorPropFactory
import scaldi.Module
import scala.collection.JavaConversions._
import io.coral.actors.DefaultActorPropFactory

class DefaultModule(config: Config) extends Module {
	private val ActorPropFactoriesConfigPath = "injections.actorPropFactories"
	bind[List[ActorPropFactory]] to createActorPropFactories

	private def createActorPropFactories: List[ActorPropFactory] = {
		getActorPropFactoryClassNames.map(Class.forName(_).newInstance.asInstanceOf[ActorPropFactory])
	}

	private def getActorPropFactoryClassNames: List[String] = {
		val additionalClassNames = if (config.hasPath(ActorPropFactoriesConfigPath)) {
			(config getStringList ActorPropFactoriesConfigPath).toList
		} else {
			List()
		}

		classOf[DefaultActorPropFactory].getName :: additionalClassNames
	}
} 
Example 21
Source File: Executable.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.core

import com.typesafe.config.Config

import scala.util.Try

trait Executable {
  def executor: Executor[Try[_]]
}

class LocalExecutable(
    appName: String,
    sparkMaster: Option[String] = None,
    config: Option[Config] = Some(LocalConfigurer.defaultConfig)) extends Executable {

  override def executor = new SparkExecutor {
    override val configurer: Configurer = new LocalConfigurer(appName, sparkMaster, config)
  }
} 
Example 22
Source File: Configurer.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.core

import com.typesafe.config.{ Config, ConfigFactory }
import org.apache.spark.SparkConf
import springnz.sparkplug.util.ConfigUtils

trait Configurer {
  def apply[A](f: SparkConf ⇒ A): A = {
    val cfg = new SparkConf()
    f(cfg)
  }
}

trait MapConfigurer extends Configurer {
  def configMap: Map[String, String]

  override def apply[A](f: SparkConf ⇒ A): A = {
    val config = super.apply { cfg ⇒
      configMap.foldLeft(cfg) {
        case (configState, (key, value)) ⇒
          configState.set(key, value.toString)
      }
    }
    f(config)
  }

  override def toString = configMap.filterNot(_._1.contains("password")).mkString("\n")
}

class LocalConfigurer(
    applicationName: String,
    sparkMaster: Option[String] = None,
    configOption: Option[Config] = Some(LocalConfigurer.defaultConfig)) extends MapConfigurer {

  protected def appName = applicationName

  private val configFields = configOption.map(config ⇒ ConfigUtils.configFields(config)).getOrElse(Map.empty)

  override def configMap = (sparkMaster match {
    case Some(masterName) ⇒ configFields.updated("spark.master", masterName)
    case None             ⇒ configFields
  }) ++ Map("spark.app.name" -> appName)

}

object LocalConfigurer {
  def defaultConfig = ConfigFactory.load.getConfig("sparkplug.spark.conf")
} 
Example 23
Source File: ScalaKinesisProducer.scala    From kpl-scala   with Apache License 2.0 5 votes vote down vote up
package com.contxt.kinesis

import com.amazonaws.services.kinesis.producer.{ KinesisProducer, KinesisProducerConfiguration, UserRecordResult }
import com.google.common.util.concurrent.ListenableFuture
import com.typesafe.config.{ Config, ConfigFactory }
import java.nio.ByteBuffer
import scala.concurrent._
import scala.language.implicitConversions
import scala.util.Try
import collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global


  def shutdown(): Future[Unit]
}

object ScalaKinesisProducer {
  def apply(
    streamName: String,
    kplConfig: KinesisProducerConfiguration,
    config: Config = ConfigFactory.load()
  ): ScalaKinesisProducer = {
    val producerStats = ProducerStats.getInstance(config)
    ScalaKinesisProducer(streamName, kplConfig, producerStats)
  }

  def apply(
    streamName: String,
    kplConfig: KinesisProducerConfiguration,
    producerStats: ProducerStats
  ): ScalaKinesisProducer = {
    val streamId = StreamId(kplConfig.getRegion, streamName)
    val producer = new KinesisProducer(kplConfig)
    new ScalaKinesisProducerImpl(streamId, producer, producerStats)
  }

  private[kinesis] implicit def listenableToScalaFuture[A](listenable: ListenableFuture[A]): Future[A] = {
    val promise = Promise[A]
    val callback = new Runnable {
      override def run(): Unit = promise.tryComplete(Try(listenable.get()))
    }
    listenable.addListener(callback, ExecutionContext.global)
    promise.future
  }
}

private[kinesis] class ScalaKinesisProducerImpl(
  val streamId: StreamId,
  private val producer: KinesisProducer,
  private val stats: ProducerStats
) extends ScalaKinesisProducer {
  import ScalaKinesisProducer.listenableToScalaFuture

  stats.reportInitialization(streamId)

  def send(partitionKey: String, data: ByteBuffer, explicitHashKey: Option[String]): Future[UserRecordResult] = {
    stats.trackSend(streamId, data.remaining) {
      producer.addUserRecord(streamId.streamName, partitionKey, explicitHashKey.orNull, data).map { result =>
        if (!result.isSuccessful) throwSendFailedException(result) else result
      }
    }
  }

  def shutdown(): Future[Unit] = shutdownOnce

  private lazy val shutdownOnce: Future[Unit] = {
    val allFlushedFuture = flushAll()
    val shutdownPromise = Promise[Unit]
    allFlushedFuture.onComplete { _ =>
      shutdownPromise.completeWith(destroyProducer())
    }
    val combinedFuture = allFlushedFuture.zip(shutdownPromise.future).map(_ => ())
    combinedFuture.onComplete(_ => stats.reportShutdown(streamId))
    combinedFuture
  }

  private def throwSendFailedException(result: UserRecordResult): Nothing = {
    val attemptCount = result.getAttempts.size
    val errorMessage = result.getAttempts.asScala.lastOption.map(_.getErrorMessage)
    throw new RuntimeException(
      s"Sending a record to $streamId failed after $attemptCount attempts, last error message: $errorMessage."
    )
  }

  private def flushAll(): Future[Unit] = {
    Future {
      blocking {
        producer.flushSync()
      }
    }
  }

  private def destroyProducer(): Future[Unit] = {
    Future {
      blocking {
        producer.destroy()
      }
    }
  }
} 
Example 24
Source File: ProducerStats.scala    From kpl-scala   with Apache License 2.0 5 votes vote down vote up
package com.contxt.kinesis

import com.amazonaws.services.kinesis.producer.UserRecordResult
import com.typesafe.config.{ Config, ConfigFactory }
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.util.control.NonFatal

trait ProducerStats {
  def trackSend(streamId: StreamId, size: Int)(closure: => Future[UserRecordResult]): Future[UserRecordResult]
  def reportInitialization(streamId: StreamId): Unit
  def reportShutdown(streamId: StreamId): Unit
}

object ProducerStats {
  private val log = LoggerFactory.getLogger(classOf[ProducerStats])

  def getInstance(config: Config): ProducerStats = {
    try {
      val className = config.getString("com.contxt.kinesis.producer.stats-class-name")
      Class.forName(className).newInstance().asInstanceOf[ProducerStats]
    }
    catch {
      case NonFatal(e) =>
        log.error("Could not load a `ProducerStats` instance, falling back to `NoopProducerStats`.", e)
        new NoopProducerStats
    }
  }
}

class NoopProducerStats extends ProducerStats {
  def trackSend(streamId: StreamId, size: Int)(closure: => Future[UserRecordResult]): Future[UserRecordResult] = closure
  def reportInitialization(streamId: StreamId): Unit = {}
  def reportShutdown(streamId: StreamId): Unit = {}
} 
Example 25
Source File: Implicits.scala    From activemq-cli   with Apache License 2.0 5 votes vote down vote up
package activemq.cli.util

import com.typesafe.config.Config
import java.util.Date
import java.util.Locale
import java.text.SimpleDateFormat
import javax.jms.Message
import javax.jms.TextMessage
import scala.collection.JavaConversions._

object Implicits {

  implicit class RichConfig(val underlying: Config) extends AnyVal {
    def getOptionalString(path: String): Option[String] = if (underlying.hasPath(path)) {
      Some(underlying.getString(path))
    } else {
      None
    }
  }

  
  implicit def optionStringToBoolean(o: Option[String]): Boolean = {
    !o.getOrElse("").isEmpty
  }

  implicit class MessageImprovements(val message: Message) {

    val prettyPrinter = new scala.xml.PrettyPrinter(100000, 2) //scalastyle:ignore

    def toXML(timestampFormat: Option[String] = None): String = {

      val addOptional = (condition: Boolean, xml: scala.xml.Elem) ⇒ if (condition) xml else scala.xml.NodeSeq.Empty

      prettyPrinter.format(<jms-message>
                             <header>
                               <message-id>{ message.getJMSMessageID }</message-id>
                               { addOptional(Option(message.getJMSCorrelationID).isDefined, <correlation-id>{ message.getJMSCorrelationID }</correlation-id>) }
                               <delivery-mode>{ message.getJMSDeliveryMode }</delivery-mode>
                               <destination>{ message.getJMSDestination }</destination>
                               <expiration>{ message.getJMSExpiration }</expiration>
                               <priority>{ message.getJMSPriority }</priority>
                               <redelivered>{ message.getJMSRedelivered }</redelivered>
                               { addOptional(Option(message.getJMSReplyTo).isDefined, <reply-to>{ message.getJMSReplyTo }</reply-to>) }
                               <timestamp>{
                                 timestampFormat match {
                                   case Some(matched)⇒ new SimpleDateFormat(matched).format(new Date(message.getJMSTimestamp))
                                   case _⇒ message.getJMSTimestamp
                                 }
                               }</timestamp>
                               { addOptional(Option(message.getJMSType).isDefined, <type>{ message.getJMSType }</type>) }
                             </header>
                             {
                               addOptional(message.getPropertyNames.hasMoreElements, <properties> {
                                 message.getPropertyNames.map(name ⇒
                                   <property><name>{ name }</name><value>{ message.getStringProperty(name.toString) }</value></property>)
                               } </properties>)
                             }
                             {
                               message match {
                                 case textMessage: TextMessage if Option(textMessage.getText).isDefined ⇒ addOptional(
                                   textMessage.getText,
                                   <body>{ scala.xml.PCData(textMessage.getText.replaceAll("]]>", "]]]]><![CDATA[>")) }</body>
                                 )
                                 case _⇒ scala.xml.NodeSeq.Empty
                               }
                             }
                           </jms-message>)
    }

    def textMatches(regex: String): Boolean = {
      if (regex) {
        message match {
          case textMessage: TextMessage ⇒ (regex.r findFirstIn textMessage.getText)
          case _                        ⇒ false
        }
      } else {
        true
      }
    }
  }
} 
Example 26
Source File: ConverterFactorySpec.scala    From cave   with MIT License 5 votes vote down vote up
package worker.converter

import com.typesafe.config.Config
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.mockito.MockitoSugar
import org.mockito.Mockito._
import scala.collection.JavaConverters._

class ConverterFactorySpec extends FlatSpec with MockitoSugar with Matchers {

  val mockConfig = mock[Config]

  "A converter factory" should "build from configuration" in {
    when(mockConfig.getStringList("list")).thenReturn(List("worker.converter.PagerDutyConverter").asJava)
    when(mockConfig.getString("default")).thenReturn("worker.converter.JsonConverter")

    val converterFactory = new ConverterFactory(mockConfig)

    converterFactory.converters.toList.size should be(1)
    converterFactory.converters.head.isInstanceOf[PagerDutyConverter] should be(true)
    converterFactory.default.isInstanceOf[JsonConverter] should be(true)
  }
} 
Example 27
Source File: AwsConfig.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data

import com.amazonaws.auth.{AWSCredentials, AWSCredentialsProvider, BasicAWSCredentials, ClasspathPropertiesFileCredentialsProvider}
import com.typesafe.config.Config

trait AmazonWebServiceConfig {
  def endpoint: String
  def service: String
  def region: String
}

class AwsConfig(config: Config) {

  private lazy val awsConfig = config.resolve.getConfig("aws")
  private lazy val rdsConfig = awsConfig.resolve.getConfig("rds")

  private lazy val awsCredentialsConfig = awsConfig.getConfig("credentials")
  lazy val awsCredentialsProvider = createAwsCredentialsProvider(
    awsCredentialsConfig.getString("access-key"),
    awsCredentialsConfig.getString("secret-key"))

  println("AWS Access Key: " + awsCredentialsProvider.getCredentials.getAWSAccessKeyId)

  private lazy val kinesisConfig = awsConfig.getConfig("kinesis")
  lazy val awsKinesisConfig = makeAmazonWebServiceConfig(kinesisConfig)

  private lazy val awsKinesisStreamConfig = kinesisConfig.getConfig("stream")
  lazy val rawStreamName = awsKinesisStreamConfig.getString("raw")
  lazy val processedStreamName = awsKinesisStreamConfig.getString("processed")

  private lazy val sqsConfig = awsConfig.getConfig("sqs")
  lazy val awsSQSConfig = makeAmazonWebServiceConfig(sqsConfig)

  lazy val longPollTimeInSeconds = sqsConfig.getInt("longPollTimeInSeconds")

  private lazy val awsSqsQueuesConfig = sqsConfig.getConfig("queues")
  lazy val configurationChangesQueueName = awsSqsQueuesConfig.getString("config-changes")
  lazy val alarmScheduleQueueName = awsSqsQueuesConfig.getString("alarm-schedule")

  private lazy val autoScalingConfig = awsConfig.getConfig("autoscaling")
  lazy val awsAutoScalingConfig = makeAmazonWebServiceConfig(autoScalingConfig)

  private lazy val ec2Config = awsConfig.getConfig("ec2")
  lazy val awsEC2Config = makeAmazonWebServiceConfig(ec2Config)

  private lazy val snsConfig = awsConfig.getConfig("sns")
  lazy val awsSNSConfig = makeAmazonWebServiceConfig(snsConfig)

  private lazy val awsSnsTopicsConfig = snsConfig.getConfig("topics")
  lazy val configurationChangesTopicName = awsSnsTopicsConfig.getString("config-changes")

  lazy val rdsJdbcDatabaseClass = rdsConfig.getString("database-class")
  lazy val rdsJdbcDatabaseUrl = rdsConfig.getString("database-jdbc")
  lazy val rdsJdbcDatabaseServer = rdsConfig.getString("database-server")
  lazy val rdsJdbcDatabasePort = rdsConfig.getString("database-port")
  lazy val rdsJdbcDatabaseName = rdsConfig.getString("database-name")
  lazy val rdsJdbcDatabaseUser = rdsConfig.getString("database-user")
  lazy val rdsJdbcDatabasePassword = rdsConfig.getString("database-password")
  lazy val rdsJdbcDatabasePoolSize = rdsConfig.getInt("pool-size")
  lazy val rdsJdbcConnectionTimeout = rdsConfig.getInt("connection-timeout")

  lazy val leadershipTermTimeoutSeconds = config.getInt("leadershipTermTimeoutSeconds")
  lazy val leadershipTermLengthSeconds = config.getInt("leadershipTermLengthSeconds")

  
  private[this] def makeAmazonWebServiceConfig(config: Config) = new AmazonWebServiceConfig {
      override def endpoint: String = config.getString("endpoint")
      override def service: String = config.getString("service")
      override def region: String = config.getString("region")
    }
} 
Example 28
Source File: InfluxClientFactory.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data.influxdb

import java.util.concurrent.Executors

import com.cave.metrics.data.Metric
import com.typesafe.config.Config
import org.joda.time.{DateTimeZone, DateTime}
import collection.JavaConversions._
import scala.concurrent.ExecutionContext

case class InfluxConfiguration(default: InfluxClusterConfig, alternates: Map[String, InfluxClusterConfig]) {

  val alts = alternates.map { case (name, config) => s"Name: $name, Config: $config"}
  println(s"Default: $default, Alters: $alts")
}

object InfluxConfiguration {

  def apply(config: Config) = {

    val default = InfluxClusterConfig(config.getString("url"), config.getString("user"), config.getString("pass"))

    val alternates = config.getConfigList("alternates") map { conf =>
      conf.getString("name") -> InfluxClusterConfig(conf.getString("url"), default.user, default.pass)
    }

    new InfluxConfiguration(default, alternates.toMap)
  }
}

class InfluxClientFactory(config: InfluxConfiguration) {

  def createClient(clusterConfig: InfluxClusterConfig): (InfluxClient, ExecutionContext) =
    new InfluxClient(clusterConfig) -> ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor())

  val defaultClient = createClient(config.default)
  val alternates = config.alternates map { case (name, clusterConfig) => name -> createClient(clusterConfig)}

  def getClient(name: Option[String]): (InfluxClient, ExecutionContext) = name match {
    case None => defaultClient
    case Some(clusterName) => alternates.getOrElse(clusterName, default = defaultClient)
  }

  def sendMetrics(metrics: Seq[Metric]): Unit = {

    val now = new DateTime().withZone(DateTimeZone.UTC).getMillis / 1000
    val maxDelay = metrics.foldLeft(0L) { case (delay, metric) =>
        Math.max(delay, Math.abs(metric.timestamp - now))
    }
    val (defaultClient, defaultContext) = getClient(None)
    defaultClient.putMetricData(Seq(
      Metric("writer-delay", now, maxDelay, Map(Metric.Organization -> Metric.Internal))
    ))(defaultContext)

    metrics.groupBy(_.tags.get(Metric.Cluster)) map { case (cluster, metricSeq) =>
      val (client, context) = getClient(cluster)
      client.putMetricData(metricSeq)(context)
    }
  }

  def close(): Unit = {
    defaultClient._1.close()
    alternates foreach { case (_, (client, _)) => client.close() }
  }
} 
Example 29
Source File: ConsumerSelfManaged.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor, Offsets}
import com.typesafe.config.Config
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "groupId",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new ConsumerSelfManaged(consumerConf, actorConf)))
  }
}

class ConsumerSelfManaged(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.ManualOffset(Offsets(Map((new TopicPartition("topic1", 0), 1))))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records)
      sender() ! Confirm(records.offsets)
  }

  private def processRecords(records: ConsumerRecords[String, String]) = {
    records.pairs.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
    log.info(s"Batch complete, offsets: ${records.offsets}")
  }
} 
Example 30
Source File: SqlDatabase.scala    From scala-clippy   with Apache License 2.0 5 votes vote down vote up
package util

import java.net.URI

import com.typesafe.config.ConfigValueFactory._
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.StrictLogging
import org.flywaydb.core.Flyway
import slick.driver.JdbcProfile
import slick.jdbc.JdbcBackend._

case class SqlDatabase(
    db: slick.jdbc.JdbcBackend#Database,
    driver: JdbcProfile,
    connectionString: JdbcConnectionString
) {
  def updateSchema() {
    val flyway = new Flyway()
    flyway.setDataSource(connectionString.url, connectionString.username, connectionString.password)
    flyway.migrate()
  }

  def close() {
    db.close()
  }
}

case class JdbcConnectionString(url: String, username: String = "", password: String = "")

object SqlDatabase extends StrictLogging {

  def create(config: DatabaseConfig): SqlDatabase = {
    val envDatabaseUrl = System.getenv("DATABASE_URL")

    if (config.dbPostgresServerName.length > 0)
      createPostgresFromConfig(config)
    else if (envDatabaseUrl != null)
      createPostgresFromEnv(envDatabaseUrl)
    else
      createEmbedded(config)
  }

  def createEmbedded(connectionString: String): SqlDatabase = {
    val db = Database.forURL(connectionString)
    SqlDatabase(db, slick.driver.H2Driver, JdbcConnectionString(connectionString))
  }

  private def createPostgresFromEnv(envDatabaseUrl: String) = {
    import DatabaseConfig._
    
    val dbUri    = new URI(envDatabaseUrl)
    val username = dbUri.getUserInfo.split(":")(0)
    val password = dbUri.getUserInfo.split(":")(1)
    val intermediaryConfig = new DatabaseConfig {
      override def rootConfig: Config =
        ConfigFactory
          .empty()
          .withValue(PostgresDSClass, fromAnyRef("org.postgresql.ds.PGSimpleDataSource"))
          .withValue(PostgresServerNameKey, fromAnyRef(dbUri.getHost))
          .withValue(PostgresPortKey, fromAnyRef(dbUri.getPort))
          .withValue(PostgresDbNameKey, fromAnyRef(dbUri.getPath.tail))
          .withValue(PostgresUsernameKey, fromAnyRef(username))
          .withValue(PostgresPasswordKey, fromAnyRef(password))
          .withFallback(ConfigFactory.load())
    }
    createPostgresFromConfig(intermediaryConfig)
  }

  private def postgresUrl(host: String, port: String, dbName: String) =
    s"jdbc:postgresql://$host:$port/$dbName"

  private def postgresConnectionString(config: DatabaseConfig) = {
    val host     = config.dbPostgresServerName
    val port     = config.dbPostgresPort
    val dbName   = config.dbPostgresDbName
    val username = config.dbPostgresUsername
    val password = config.dbPostgresPassword
    JdbcConnectionString(postgresUrl(host, port, dbName), username, password)
  }

  private def createPostgresFromConfig(config: DatabaseConfig) = {
    val db = Database.forConfig("db.postgres", config.rootConfig)
    SqlDatabase(db, slick.driver.PostgresDriver, postgresConnectionString(config))
  }

  private def createEmbedded(config: DatabaseConfig): SqlDatabase = {
    val db = Database.forConfig("db.h2")
    SqlDatabase(db, slick.driver.H2Driver, JdbcConnectionString(embeddedConnectionStringFromConfig(config)))
  }

  private def embeddedConnectionStringFromConfig(config: DatabaseConfig): String = {
    val url      = config.dbH2Url
    val fullPath = url.split(":")(3)
    logger.info(s"Using an embedded database, with data files located at: $fullPath")
    url
  }
} 
Example 31
Source File: ConfigWithDefault.scala    From scala-clippy   with Apache License 2.0 5 votes vote down vote up
package util

import java.util.concurrent.TimeUnit

import com.typesafe.config.Config

trait ConfigWithDefault {

  def rootConfig: Config

  def getBoolean(path: String, default: Boolean) = ifHasPath(path, default) { _.getBoolean(path) }
  def getString(path: String, default: String)   = ifHasPath(path, default) { _.getString(path) }
  def getInt(path: String, default: Int)         = ifHasPath(path, default) { _.getInt(path) }
  def getConfig(path: String, default: Config)   = ifHasPath(path, default) { _.getConfig(path) }
  def getMilliseconds(path: String, default: Long) = ifHasPath(path, default) {
    _.getDuration(path, TimeUnit.MILLISECONDS)
  }
  def getOptionalString(path: String, default: Option[String] = None) = getOptional(path) { _.getString(path) }

  private def ifHasPath[T](path: String, default: T)(get: Config => T): T =
    if (rootConfig.hasPath(path)) get(rootConfig) else default

  private def getOptional[T](fullPath: String, default: Option[T] = None)(get: Config => T) =
    if (rootConfig.hasPath(fullPath)) {
      Some(get(rootConfig))
    } else {
      default
    }

} 
Example 32
Source File: DatabaseConfig.scala    From scala-clippy   with Apache License 2.0 5 votes vote down vote up
package util

import com.typesafe.config.Config

trait DatabaseConfig extends ConfigWithDefault {
  def rootConfig: Config

  import DatabaseConfig._

  lazy val dbH2Url              = getString(s"db.h2.properties.url", "jdbc:h2:file:./data")
  lazy val dbPostgresServerName = getString(PostgresServerNameKey, "")
  lazy val dbPostgresPort       = getString(PostgresPortKey, "5432")
  lazy val dbPostgresDbName     = getString(PostgresDbNameKey, "")
  lazy val dbPostgresUsername   = getString(PostgresUsernameKey, "")
  lazy val dbPostgresPassword   = getString(PostgresPasswordKey, "")
}

object DatabaseConfig {
  val PostgresDSClass       = "db.postgres.dataSourceClass"
  val PostgresServerNameKey = "db.postgres.properties.serverName"
  val PostgresPortKey       = "db.postgres.properties.portNumber"
  val PostgresDbNameKey     = "db.postgres.properties.databaseName"
  val PostgresUsernameKey   = "db.postgres.properties.user"
  val PostgresPasswordKey   = "db.postgres.properties.password"
} 
Example 33
Source File: ConfigurationDetectorSpec.scala    From twitter4s   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.twitter4s.util

import com.typesafe.config.{Config, ConfigException}
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import org.specs2.specification.Scope

class ConfigurationDetectorSpec extends Specification with Mockito {

  val myConfigFromEnvVar = "my-configuration-from-env-var"
  val myConfigFromFile = "my-configuration-from-file"

  abstract class ConfigurationDetectorSpecContext extends Scope {
    def config = mock[Config]

    val variableName = "MY-CONFIG"
    val configName = "my.config"
  }

  trait NoEnvVariable extends ConfigurationDetector {
    override protected def environmentVariable(name: String) = None
  }

  trait WithEnvVariable extends ConfigurationDetector {
    override protected def environmentVariable(name: String) = Some(myConfigFromEnvVar)
  }

  trait NoConfigFromFile extends ConfigurationDetector {
    override protected def configuration(path: String) = throw new ConfigException.Missing(path)
  }

  trait WithConfigFromFile extends ConfigurationDetector {
    override protected def configuration(path: String) = myConfigFromFile
  }

  "ConfigurationDetector" should {

    "if environment variable exists" in {

      "if configuration from file does not exists" in {
        "detect the configuration from the environment variable" in
          new ConfigurationDetectorSpecContext with WithEnvVariable with NoConfigFromFile {
            envVarOrConfig(variableName, configName) === myConfigFromEnvVar
          }
      }

      "if configuration from file exists" in {
        "detect the configuration from the environment variable" in
          new ConfigurationDetectorSpecContext with WithEnvVariable with WithConfigFromFile {
            envVarOrConfig(variableName, configName) === myConfigFromEnvVar
          }
      }
    }

    "if environment variable does not exist" in {

      "if configuration from file exists" in {
        "detect the configuration from the configuration file" in
          new ConfigurationDetectorSpecContext with NoEnvVariable with WithConfigFromFile {
            envVarOrConfig(variableName, configName) === myConfigFromFile
          }
      }

      "if configuration from file does not exist" in {
        "throw an exception" in
          new ConfigurationDetectorSpecContext with NoEnvVariable with NoConfigFromFile {
            envVarOrConfig(variableName, configName) must throwA[RuntimeException]
          }
      }
    }
  }
} 
Example 34
Source File: FlickrClient.scala    From scalando   with MIT License 5 votes vote down vote up
package com.jcranky.flickr

import com.jcranky.flickr.model.Foto
import com.typesafe.config.Config

class FlickrClient(apiKey: String, baseUrl: String, httpClient: HttpClient, responseParser: ResponseParser) {
  import FlickrClient._

  def buscaFotos(tags: List[String]): Either[ClientError, Seq[Foto]] = {
    val url = s"$baseUrl?method=$searchMethod&api_key=$apiKey&tags=${tags.mkString(",")}"

    val response = httpClient.get(url)

    response.fold(
      (err) => Left(ClientError(err.msg)),
      (resp) => responseParser.parse(resp.body) match {
        case Right(parsed) => Right(parsed)
        case Left(error) => Left(ClientError(error.toString))
      }
    )
  }
}

object FlickrClient {
  case class ClientError(msg: String)

  val searchMethod = "flickr.photos.search"

  def fromConfig(config: Config): FlickrClient = {
    val apiKey = config.getString("flickr.api.key")
    val baseUrl = config.getString("flickr.api.baseurl")

    new FlickrClient(apiKey, baseUrl, HttpClient.fromConfig(), ResponseParser.fromConfig(config))
  }
} 
Example 35
Source File: ResponseParser.scala    From scalando   with MIT License 5 votes vote down vote up
package com.jcranky.flickr

import com.jcranky.flickr.model.Foto
import com.typesafe.config.Config

import scala.xml.{Elem, XML}

sealed trait ResponseParser {
  def parse(xmlStr: String): Either[FlickrError, Seq[Foto]]
}

final class XmlFlickrParser extends ResponseParser {
  import ResponseParser._

  override def parse(xmlStr: String): Either[FlickrError, Seq[Foto]] = {
    val xml = XML.loadString(xmlStr)

    (xml \\ "rsp" \ "@stat").text match {
      case "ok" => Right(processSuccess(xml))
      case `failStat` => Left(processFailure(xml))
      case _ => Left(FlickrUnknownError(unknownFlickrResp))
    }
  }

  def processSuccess(xml: Elem): Seq[Foto] =
    (xml \\ "photo").map { photoXml =>
      Foto(
        (photoXml \ "@id").text,
        (photoXml \ "@owner").text,
        (photoXml \ "@secret").text,
        (photoXml \ "@server").text,
        (photoXml \ "@farm").text.toInt,
        (photoXml \ "@title").text,
        flickrBoolean((photoXml \ "@ispublic").text),
        flickrBoolean((photoXml \ "@isfriend").text),
        flickrBoolean((photoXml \ "@isfamily").text)
      )
    }

  def processFailure(xml: Elem): FlickrError =
    (xml \\ "err").map { errXml =>
      FlickrKnownError(
        (errXml \ "@code").text.toInt,
        (errXml \ "@msg").text
      )
    }.headOption.getOrElse(
      FlickrUnknownError(errNotFound)
    )
}

final class JsonFlickrParser extends ResponseParser {
  
  def parse(xmlStr: String): Either[FlickrError, Seq[Foto]] = ???
}

object ResponseParser {
  val okStat = "ok"
  val failStat = "fail"

  val unknownFlickrResp = "Could not parse Flickr response"
  val errNotFound = "Could not parser Flickr error response"

  def flickrBoolean(rawAttribute: String): Boolean =
    rawAttribute.toInt match {
      case 1 => true
      case _ => false
    }

  def fromConfig(config: Config): ResponseParser = {
    val parser = config.getString("flickr.api.parser")
    parser match {
      case "xml" => new XmlFlickrParser()
      case "json" => new JsonFlickrParser()
      // the config could be wrongly set by the user, so we default here to use the xml parser
      case _ => new XmlFlickrParser()
    }
  }
}

sealed trait FlickrError
final case class FlickrKnownError(code: Int, msg: String) extends FlickrError
final case class FlickrUnknownError(msg: String) extends FlickrError 
Example 36
Source File: ConfigUtils.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.util

import com.typesafe.config.Config

import scala.collection.JavaConversions._

object ConfigUtils {

  def configFields(appConfig: Config): Map[String, String] = {
    appConfig.entrySet
      .map {
        case entry ⇒
          val key = entry.getKey
          (key, appConfig.getString(key))
      }
      .toMap
  }

} 
Example 37
Source File: CustomRouteTestKit.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.testkit

import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.TestKitBase
import com.typesafe.config.Config
import org.scalatest.Suite
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.{Unicomplex, UnicomplexBoot}

abstract class CustomRouteTestKit(val boot: UnicomplexBoot) extends {
  implicit override val system = boot.actorSystem
} with TestKitBase with Suite with ScalatestRouteTest with DebugTiming with PortGetter {

  def this() {
    this(CustomTestKit.boot())
  }

  def this(actorSystemName: String) {
    this(CustomTestKit.boot(Option(actorSystemName)))
  }

  def this(config: Config) {
    this(CustomTestKit.boot(config = Option(config)))
  }

  def this(resources: Seq[String], withClassPath: Boolean) {
    this(CustomTestKit.boot(resources = Option(resources), withClassPath = Option(withClassPath)))
  }

  def this(actorSystemName: String, resources: Seq[String], withClassPath: Boolean) {
    this(CustomTestKit.boot(Option(actorSystemName), resources = Option(resources), withClassPath = Option(withClassPath)))
  }

  def this(config: Config, resources: Seq[String], withClassPath: Boolean) {
    this(CustomTestKit.boot(config = Option(config), resources = Option(resources), withClassPath = Option(withClassPath)))
  }

  override protected def beforeAll(): Unit = {
    CustomTestKit.checkInit(system)
  }

  override protected def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }
} 
Example 38
Source File: UnicomplexActorPublisherJSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.testkit.scaladsl.TestSource
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

import scala.concurrent.duration._

object UnicomplexActorPublisherJSpec {
  val myConfig: Config = ConfigFactory.parseString(
    """
      | squbs.actorsystem-name = UnicomplexActorPublisherJSpec
    """.stripMargin)
  val boot = UnicomplexBoot(myConfig).createUsing((name, config) => ActorSystem(name, config))
    .scanResources("/")
    .initExtensions
    .start()
}

final class UnicomplexActorPublisherJSpec extends TestKit(UnicomplexActorPublisherJSpec.boot.actorSystem)
    with FlatSpecLike with Matchers with BeforeAndAfterAll {
  implicit val materializer = ActorMaterializer()
  val duration = 10.second

  val in = TestSource.probe[String]

  // expose probe port(s)
  val mat = new UnicomplexActorPublisherJ(system).runnableGraph()
  val (pub, sub) = mat.toScala
  val (pubIn, pubTrigger) = pub.toScala

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "UnicomplexTriggerJ" should "activate flow by unicomplex" in {
    // send 2 elements to in
    pubIn.sendNext("1")
    pubIn.sendNext("2")
    sub.request(2)
    sub.expectNext(duration, "1")
    sub.expectNext("2")

    // re-send Active to unicomplex trigger, flow continues
    sub.request(2)
    sub.expectNoMessage(remainingOrDefault)
    pubTrigger ! SystemState
    pubIn.sendNext("3")
    pubIn.sendNext("4")
    sub.expectNext("3", "4")
  }
} 
Example 39
Source File: UnicomplexActorPublisherSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

import scala.concurrent.duration._

object UnicomplexActorPublisherSpec {
  val myConfig: Config = ConfigFactory.parseString(
    """
      | squbs.actorsystem-name = UnicomplexActorPublisherSpec
    """.stripMargin)
  val boot = UnicomplexBoot(myConfig).createUsing((name, config) => ActorSystem(name, config))
    .scanResources("/")
    .initExtensions
    .start()
}

final class UnicomplexActorPublisherSpec extends TestKit(UnicomplexActorPublisherSpec.boot.actorSystem)
    with FlatSpecLike with Matchers with BeforeAndAfterAll {

  implicit val materializer = ActorMaterializer()
  val duration = 10.second

  val in = TestSource.probe[String]

  // expose probe port(s)
  val ((pubIn, pubTrigger), sub) = LifecycleManaged().source(in).toMat(TestSink.probe[String](system))(Keep.both).run()

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "UnicomplexTrigger" should "activate flow by unicomplex" in {
    // send 2 elements to in
    pubIn.sendNext("1")
    pubIn.sendNext("2")
    sub.request(2)
    sub.expectNext(duration, "1")
    sub.expectNext("2")

    // re-send Active to unicomplex trigger, flow continues
    sub.request(2)
    sub.expectNoMessage(remainingOrDefault)
    pubTrigger ! SystemState
    pubIn.sendNext("3")
    pubIn.sendNext("4")
    sub.expectNext("3", "4")
  }
} 
Example 40
Source File: ActorRegistryInit.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.actorregistry


import akka.actor._
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import org.squbs.lifecycle.ExtensionLifecycle
import org.squbs.util.ConfigUtil
import org.squbs.unicomplex.UnicomplexBoot.StartupType

import scala.concurrent.duration._


private class ActorRegistryInit extends ExtensionLifecycle with LazyLogging {

  override def postInit(): Unit = {
    logger.info(s"postInit ${this.getClass}")

    import ConfigUtil._
    import boot._

    val registryConfig = config.getConfig("squbs-actorregistry")

    val cubeActorList = cubes.filterNot(x=> x.info.name == "ActorRegistryCube" || x.info.name == "RemoteCube").flatMap {
      cube =>
        cube.components.getOrElse(StartupType.ACTORS, Seq.empty).map {
          config =>
            val className = config getString "class-name"
            val actorName = config.get[String]("name", className.substring(className.lastIndexOf('.') + 1))
            val messageTypeList = config.get[Seq[Config]]("message-class", Seq.empty[Config]).map { x =>
              CubeActorMessageType(x.getOption[String]("request"), x.getOption[String]("response"))
            }

            val path = s"/user/${cube.info.name}/$actorName"

            CubeActorInfo(path, messageTypeList)
        }
    }.toList

    val t = registryConfig.getInt("timeout")
    implicit val system = boot.actorSystem
    system.actorOf(Props(classOf[HelperActor],
              system.actorSelection(ActorRegistry.path),
              StartActorRegister(cubeActorList, t),
              FiniteDuration(t, MILLISECONDS)))
  }
}

private class HelperActor(sel: ActorSelection, msg: Any, duration: FiniteDuration) extends Actor {
  import context.dispatcher
  sel ! Identify("try")

  context.setReceiveTimeout(duration)
  def receive = {
    case ActorIdentity("try", Some(actor)) =>
      actor ! msg
      context.stop(self)
    case ReceiveTimeout =>
      context.setReceiveTimeout(Duration.Undefined)
      context.system.scheduler.scheduleOnce(duration) {
        sel ! Identify("try")
      }
  }
} 
Example 41
Source File: HogConfig.scala    From hogzilla   with GNU General Public License v2.0 5 votes vote down vote up
package org.hogzilla.util

import java.security.MessageDigest
import org.apache.hadoop.hbase.util.Bytes
import javax.xml.bind.DatatypeConverter
import math._
import com.typesafe.config.Config
import scala.collection.mutable.HashSet



object HogConfig {
  
   
  
  def get(config:Config,key:String,valueType:String,default:Any):Any =
  {
    if(config==null)
      return default
    
    
      try {
        
        val value = config.getString(key)
        
        if(value.isEmpty())
          return default // Return default value
        
        println(f"Configuration: $key => $value")
       
        if(valueType.equals("Int"))
          value.toInt 
        else if(valueType.equals("Double"))
          value.toDouble 
        else if(valueType.equals("Long"))
          value.toLong 
        else if(valueType.equals("Set(Int)"))
        {
          val patternSet="Set\\(".r
          val patternSetEnd="\\)".r
          
          if(value.equals("Set()"))
            return Set()
          
          return (patternSetEnd replaceAllIn((patternSet replaceAllIn(value, "")),""))
                  .split(",").map({x => x.toInt}).toSet
        }
        else if(valueType.equals("Set(String)"))
        {
          val patternSet="Set\\(".r
          val patternSetEnd="\\)".r
          
          if(value.equals("Set()"))
            return Set()
          
          return (patternSetEnd replaceAllIn((patternSet replaceAllIn(value, "")),""))
                  .split(",").map({x => println(x.toString.trim()) ; x.toString.trim()}).toSet
        }
        else
          default // Create type first
          
      } catch {
        case t: Throwable => t.printStackTrace() 
        println(f"Problem parsing $key . Check if it is ok. Using default value")
        
        return default
      } 
  
  }
  
  def getInt(config:Config,key:String,default:Any):Int =
  {
    get(config,key,"Int",default).asInstanceOf[Int]
  }
  
  def getLong(config:Config,key:String,default:Any):Long =
  {
    get(config,key,"Long",default).asInstanceOf[Long]
  }
  
  def getDouble(config:Config,key:String,default:Any):Double =
  {
    get(config,key,"Double",default).asInstanceOf[Long]
  }
  
  def getSetInt(config:Config,key:String,default:Any):Set[Int] =
  {
    get(config,key,"Set(Int)",default).asInstanceOf[Set[Int]]
  }
  
  def getSetString(config:Config,key:String,default:Any):Set[String] =
  {
    get(config,key,"Set(String)",default).asInstanceOf[Set[String]]
  }
   

} 
Example 42
Source File: RunnerConfigUtils.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.blueprint

import com.typesafe.config.{ Config, ConfigFactory }

import scala.io.Source
import scala.util.{ Failure, Success, Try }

object RunnerConfigUtils {
  val StorageMountPathKey = "storage.mountPath"
  val MetadataName        = "metadata.name"
  val MetadataNamespace   = "metadata.namespace"
  val MetadataUid         = "metadata.uid"

  def addStorageConfig(config: Config, pvcVolumeMountPath: String): Config = {
    val storageConfig = ConfigFactory.parseString(s"""$StorageMountPathKey:"$pvcVolumeMountPath"""")
    config.withFallback(storageConfig)
  }

  def addPodRuntimeConfig(config: Config, downwardApiVolumeMountPath: String): Config = {
    val (name, namespace, uid) = getPodMetadata(downwardApiVolumeMountPath)
    val podRuntimeConfig       = ConfigFactory.parseString(s"""
                                                              |cloudflow.runner.pod: {
                                                              |  $MetadataName:"$name"
                                                              |  $MetadataNamespace:"$namespace"
                                                              |  $MetadataUid:"$uid"
                                                              |}
                                                              |""".stripMargin)
    config.withFallback(podRuntimeConfig)
  }

  def getPodMetadata(downwardApiVolumeMountPath: String): (String, String, String) = {
    val name      = readDownwardApi(downwardApiVolumeMountPath, MetadataName)
    val namespace = readDownwardApi(downwardApiVolumeMountPath, MetadataNamespace)
    val uid       = readDownwardApi(downwardApiVolumeMountPath, MetadataUid)
    (name, namespace, uid)
  }

  private def readDownwardApi(downwardApiVolumeMountPath: String, filename: String): String = {
    val path = s"$downwardApiVolumeMountPath/$filename"
    Try(Source.fromFile(path).getLines.mkString) match {
      case Success(contents) ⇒ contents
      case Failure(ex) ⇒
        throw new Exception(s"An error occurred while attempting to access the downward API volume mount with path '$path'", ex)
    }
  }
} 
Example 43
Source File: SparkStreamletContextImpl.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.spark.kafka

import java.io.File

import com.typesafe.config.Config
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.encoders.{ ExpressionEncoder, RowEncoder }
import org.apache.spark.sql.streaming.{ OutputMode, StreamingQuery }
import cloudflow.spark.SparkStreamletContext
import cloudflow.spark.avro.{ SparkAvroDecoder, SparkAvroEncoder }
import cloudflow.spark.sql.SQLImplicits._
import cloudflow.streamlets._

import scala.reflect.runtime.universe._

class SparkStreamletContextImpl(
    private[cloudflow] override val streamletDefinition: StreamletDefinition,
    session: SparkSession,
    override val config: Config
) extends SparkStreamletContext(streamletDefinition, session) {

  val storageDir           = config.getString("storage.mountPath")
  val maxOffsetsPerTrigger = config.getLong("cloudflow.spark.read.options.max-offsets-per-trigger")
  def readStream[In](inPort: CodecInlet[In])(implicit encoder: Encoder[In], typeTag: TypeTag[In]): Dataset[In] = {

    implicit val inRowEncoder: ExpressionEncoder[Row] = RowEncoder(encoder.schema)
    val schema                                        = inPort.schemaAsString
    val topic                                         = findTopicForPort(inPort)
    val srcTopic                                      = topic.name
    val brokers                                       = topic.bootstrapServers.getOrElse(internalKafkaBootstrapServers)
    val src: DataFrame = session.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", brokers)
      .options(kafkaConsumerMap(topic))
      .option("maxOffsetsPerTrigger", maxOffsetsPerTrigger)
      .option("subscribe", srcTopic)
      // Allow restart of stateful streamlets that may have been offline for longer than the kafka retention period.
      // This setting may result in data loss in some cases but allows for continuity of the runtime
      .option("failOnDataLoss", false)
      .option("startingOffsets", "earliest")
      .load()

    val rawDataset = src.select($"value").as[Array[Byte]]

    val dataframe: Dataset[Row] = rawDataset.mapPartitions { iter ⇒
      val avroDecoder = new SparkAvroDecoder[In](schema)
      iter.map(avroDecoder.decode)
    }(inRowEncoder)

    dataframe.as[In]
  }

  def kafkaConsumerMap(topic: Topic) = topic.kafkaConsumerProperties.map {
    case (key, value) => s"kafka.$key" -> value
  }
  def kafkaProducerMap(topic: Topic) = topic.kafkaProducerProperties.map {
    case (key, value) => s"kafka.$key" -> value
  }

  def writeStream[Out](stream: Dataset[Out], outPort: CodecOutlet[Out], outputMode: OutputMode)(implicit encoder: Encoder[Out],
                                                                                                typeTag: TypeTag[Out]): StreamingQuery = {

    val avroEncoder   = new SparkAvroEncoder[Out](outPort.schemaAsString)
    val encodedStream = avroEncoder.encodeWithKey(stream, outPort.partitioner)

    val topic     = findTopicForPort(outPort)
    val destTopic = topic.name
    val brokers   = topic.bootstrapServers.getOrElse(internalKafkaBootstrapServers)

    // metadata checkpoint directory on mount
    val checkpointLocation = checkpointDir(outPort.name)
    val queryName          = s"$streamletRef.$outPort"

    encodedStream.writeStream
      .outputMode(outputMode)
      .format("kafka")
      .queryName(queryName)
      .option("kafka.bootstrap.servers", brokers)
      .options(kafkaProducerMap(topic))
      .option("topic", destTopic)
      .option("checkpointLocation", checkpointLocation)
      .start()
  }

  def checkpointDir(dirName: String): String = {
    val baseCheckpointDir = new File(storageDir, streamletRef)
    val dir               = new File(baseCheckpointDir, dirName)
    if (!dir.exists()) {
      val created = dir.mkdirs()
      require(created, s"Could not create checkpoint directory: $dir")
    }
    dir.getAbsolutePath
  }
} 
Example 44
Source File: StreamletLoader.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.streamlets

import scala.util.{ Failure, Try }
import scala.util.control.NoStackTrace

import com.typesafe.config.Config

case class LoadedStreamlet(streamlet: Streamlet[StreamletContext], config: StreamletDefinition)


trait StreamletLoader {

  def loadStreamlet(config: Config): Try[LoadedStreamlet] =
    for {
      streamletConfig ← StreamletDefinition.read(config)
      loadedStreamlet ← loadStreamlet(streamletConfig)
    } yield loadedStreamlet

  def loadStreamletClass(streamletClassName: String): Try[Streamlet[StreamletContext]] =
    for {
      instance ← ClassOps.instanceOf(streamletClassName).recoverWith {
        case _: ClassNotFoundException ⇒ Failure(new StreamletClassNotFound(streamletClassName))
        case _: InstantiationException ⇒ Failure(new NoArgsConstructorExpectedException(streamletClassName))
      }
      streamlet ← Try(instance.asInstanceOf[Streamlet[StreamletContext]]).recoverWith {
        case ex: ClassCastException ⇒ Failure(new InvalidStreamletClass(streamletClassName, ex))
      }
    } yield streamlet

  def loadStreamlet(streamletConfig: StreamletDefinition): Try[LoadedStreamlet] =
    loadStreamletClass(streamletConfig.streamletClass).map { streamlet ⇒
      LoadedStreamlet(streamlet, streamletConfig)
    }

  case class StreamletClassNotFound(className: String)
      extends Exception(s"The configured Streamlet class $className not found")
      with NoStackTrace

  case class InvalidStreamletClass(className: String, cause: Exception)
      extends Exception(s"The configured Streamlet class $className is invalid")
      with NoStackTrace

  case class NoArgsConstructorExpectedException(className: String)
      extends Exception(s"The configured Streamlet class $className must have an arg-less constructor")
      with NoStackTrace

} 
Example 45
Source File: TestStreamlets.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.streamlets.descriptors

import scala.collection.immutable

import org.apache.avro.SchemaBuilder

import com.typesafe.config.Config

import cloudflow.streamlets._
import cloudflow.streamlets.avro.AvroUtil

case class Coffee(espressos: Int)

object Schemas {
  val coffeeSchema = SchemaBuilder
    .record("Coffee")
    .namespace("cloudflow.sbt")
    .fields()
    .name("expressos")
    .`type`()
    .nullable()
    .intType()
    .noDefault()
    .endRecord()
}

case object TestRuntime extends StreamletRuntime {
  override val name = "test-runtime"
}

trait TestStreamlet extends Streamlet[StreamletContext] {
  override def runtime: StreamletRuntime                                 = TestRuntime
  def logStartRunnerMessage(buildInfo: String): Unit                     = ???
  override protected def createContext(config: Config): StreamletContext = ???
  override def run(context: StreamletContext): StreamletExecution        = ???

}

class CoffeeIngress extends Streamlet[StreamletContext] with TestStreamlet {
  case class TestOutlet(name: String, schemaDefinition: SchemaDefinition) extends Outlet
  override val shape                                = StreamletShape(TestOutlet("out", AvroUtil.createSchemaDefinition(Schemas.coffeeSchema)))
  override val labels: immutable.IndexedSeq[String] = Vector("test", "coffee")
  override val description: String                  = "Coffee Ingress Test"
} 
Example 46
Source File: Dispatchers.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core

import com.typesafe.config.Config
import scala.collection.JavaConverters._
import swave.core.impl.util.SettingsCompanion

abstract class Dispatchers private[core] {

  def settings: Dispatchers.Settings

  def defaultDispatcher: Dispatcher

  def apply(id: String): Dispatcher
}

object Dispatchers {

  final case class Settings(dispatcherDefs: Map[String, Dispatcher.Settings])

  object Settings extends SettingsCompanion[Settings]("swave.core.dispatcher") {
    def fromSubConfig(c: Config): Settings = {
      val defConf    = c getConfig "default-config"
      val definition = c getConfig "definition"
      Settings {
        definition.root().keySet().iterator().asScala.foldLeft(Map.empty[String, Dispatcher.Settings]) { (map, name) ⇒
          map.updated(name, Dispatcher.Settings(name, definition getConfig name, defConf))
        }
      }
    }
  }
} 
Example 47
Source File: Scheduler.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.{Duration, FiniteDuration}
import com.typesafe.config.Config
import swave.core.impl.util.SettingsCompanion
import swave.core.macros._
import swave.core.util._

trait Scheduler {

  def settings: Scheduler.Settings

  final def schedule(interval: FiniteDuration)(body: ⇒ Unit)(implicit ec: ExecutionContext): Cancellable =
    schedule(Duration.Zero, interval)(body)

  final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(body: ⇒ Unit)(
      implicit ec: ExecutionContext): Cancellable =
    schedule(initialDelay, interval, Runnable(body))

  final def schedule(interval: FiniteDuration, r: Runnable)(implicit ec: ExecutionContext): Cancellable =
    schedule(Duration.Zero, interval, r)

  def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, r: Runnable)(
      implicit ec: ExecutionContext): Cancellable

  final def scheduleOnce(delay: FiniteDuration)(body: ⇒ Unit)(implicit ec: ExecutionContext): Cancellable =
    scheduleOnce(delay, Runnable(body))

  def scheduleOnce(delay: FiniteDuration, r: Runnable)(implicit ec: ExecutionContext): Cancellable
}

object Scheduler {

  final case class Settings(tickDuration: FiniteDuration, ticksPerWheel: Int) {
    requireArg(tickDuration > Duration.Zero, "`tickDuration` must be > 0")
    requireArg(ticksPerWheel > 0, "`ticksPerWheel` must be > 0")
    requireArg(isPowerOf2(ticksPerWheel), "`ticksPerWheel` must be a power of 2")

    def withTickDuration(tickDuration: FiniteDuration) = copy(tickDuration = tickDuration)
    def withTicksPerWheel(ticksPerWheel: Int)          = copy(ticksPerWheel = ticksPerWheel)
  }

  object Settings extends SettingsCompanion[Settings]("swave.core.scheduler") {
    def fromSubConfig(c: Config): Settings =
      Settings(tickDuration = c getFiniteDuration "tick-duration", ticksPerWheel = c getInt "ticks-per-wheel")
  }
} 
Example 48
Source File: RichConfig.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.util

import com.typesafe.config.{Config, ConfigException}
import swave.core.ConfigurationException
import scala.concurrent.duration._

final class RichConfig(val underlying: Config) extends AnyVal {

  def getScalaDuration(path: String): Duration =
    underlying.getString(path) match {
      case "infinite" ⇒ Duration.Inf
      case x          ⇒ Duration(x)
    }

  def getFiniteDuration(path: String): FiniteDuration =
    Duration(underlying getString path) match {
      case x: FiniteDuration ⇒ x
      case _                 ⇒ throw new ConfigurationException(s"Config setting '$path' must be a finite duration")
    }

  def getPossiblyInfiniteInt(path: String): Int =
    underlying.getString(path) match {
      case "infinite" ⇒ Int.MaxValue
      case x          ⇒ underlying getInt path
    }

  def getIntBytes(path: String): Int = {
    val value: Long = underlying getBytes path
    if (value <= Int.MaxValue) value.toInt
    else throw new ConfigurationException(s"Config setting '$path' must not be larger than ${Int.MaxValue}")
  }

  def getPossiblyInfiniteIntBytes(path: String): Int =
    underlying.getString(path) match {
      case "infinite" ⇒ Int.MaxValue
      case x          ⇒ getIntBytes(path)
    }

  def getPossiblyInfiniteLongBytes(path: String): Long =
    underlying.getString(path) match {
      case "infinite" ⇒ Long.MaxValue
      case x          ⇒ underlying getBytes path
    }

  def getOptionalConfig(path: String): Option[Config] =
    try Some(underlying getConfig path)
    catch {
      case _: ConfigException.Missing ⇒ None
    }
} 
Example 49
Source File: package.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core

import java.nio.charset.Charset
import com.typesafe.config.Config
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.collection.mutable
import shapeless.HList

package object util {

  private[this] val _identityFunc = (x: Any) ⇒ x
  def identityFunc[T]: T ⇒ T      = _identityFunc.asInstanceOf[T ⇒ T]

  def identityHash(obj: AnyRef): String = Integer.toHexString(System.identityHashCode(obj))

  val dropFunc: Any ⇒ Unit         = _ ⇒ ()
  val dropFunc2: (Any, Any) ⇒ Unit = (_, _) ⇒ ()

  val oneIntFunc: Any ⇒ Int = _ ⇒ 1

  val UTF8: Charset  = Charset.forName("UTF-8")
  val ASCII: Charset = Charset.forName("US-ASCII")

  def isPowerOf2(i: Int): Boolean = Integer.lowestOneBit(i) == i

  def roundUpToPowerOf2(i: Int): Int = 1 << (32 - Integer.numberOfLeadingZeros(i - 1))

  def Runnable(body: ⇒ Unit): Runnable = new Runnable { def run(): Unit = body }

  implicit def richByteArray(array: Array[Byte]): RichByteArray                    = new RichByteArray(array)
  implicit def richConfig[T](config: Config): RichConfig                           = new RichConfig(config)
  implicit def richDuration(duration: Duration): RichDuration                      = new RichDuration(duration)
  implicit def richFiniteDuration(duration: FiniteDuration): RichFiniteDuration    = new RichFiniteDuration(duration)
  implicit def richFuture[T](future: Future[T]): RichFuture[T]                     = new RichFuture(future)
  implicit def richHList[L <: HList](list: L): RichHList[L]                        = new RichHList(list)
  implicit def richInt(int: Int): RichInt                                          = new RichInt(int)
  implicit def richList[T](list: List[T]): RichList[T]                             = new RichList(list)
  implicit def richLong(long: Long): RichLong                                      = new RichLong(long)
  implicit def richArrayBuffer[T](seq: mutable.ArrayBuffer[T]): RichArrayBuffer[T] = new RichArrayBuffer(seq)
  implicit def richRefArray[T <: AnyRef](array: Array[T]): RichRefArray[T]         = new RichRefArray(array)
  implicit def richSeq[T](seq: Seq[T]): RichSeq[T]                                 = new RichSeq(seq)
  implicit def richString(string: String): RichString                              = new RichString(string)
  implicit def richTraversable[T](seq: Traversable[T]): RichTraversable[T]         = new RichTraversable(seq)
} 
Example 50
Source File: StreamEnvImpl.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.impl

import java.util.concurrent.{ConcurrentHashMap, TimeoutException}
import scala.annotation.tailrec
import scala.util.Try
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import swave.core.macros._
import swave.core._

private[core] final class StreamEnvImpl(val name: String,
                                        val config: Config,
                                        val settings: StreamEnv.Settings,
                                        val classLoader: ClassLoader)
    extends StreamEnv {

  val startTime = System.currentTimeMillis()

  val log = Logger(LoggerFactory.getLogger(name))

  val dispatchers = DispatchersImpl(settings.dispatcherSettings)

  val scheduler = SchedulerImpl(settings.schedulerSettings)

  if (settings.logConfigOnStart) log.info(settings.toString) // TODO: improve rendering

  def defaultDispatcher = dispatchers.defaultDispatcher

  def shutdown(): StreamEnv.Termination =
    new StreamEnv.Termination {
      val schedulerTermination   = scheduler.shutdown()
      val dispatchersTermination = dispatchers.shutdownAll()

      def isTerminated: Boolean = schedulerTermination.isCompleted && unterminatedDispatchers.isEmpty

      def unterminatedDispatchers: List[String] = dispatchersTermination()

      def awaitTermination(timeout: FiniteDuration): Unit = {
        requireArg(timeout >= Duration.Zero, "`timeout` must be > 0")
        var deadline = System.nanoTime() + timeout.toNanos
        if (deadline < 0) deadline = Long.MaxValue // overflow protection

        @tailrec def await(): Unit =
          if (!isTerminated) {
            if (System.nanoTime() < deadline) {
              Thread.sleep(1L)
              await()
            } else {
              val unterminated =
                if (schedulerTermination.isCompleted) unterminatedDispatchers
                else "scheduler" :: unterminatedDispatchers
              throw new TimeoutException(
                s"StreamEnv did not shut down within specified timeout of $timeout.\n" +
                  s"Unterminated dispatchers: [${unterminated.mkString(", ")}]")
            }
          }

        await()
      }
    }

  private[this] val _extensions = new ConcurrentHashMap[ExtensionId[_], Future[_ <: Extension]]

  @tailrec def getOrLoadExtension[T <: Extension](id: ExtensionId[T]): Future[T] =
    _extensions.get(id) match {
      case null ⇒
        val promise = Promise[T]()
        _extensions.putIfAbsent(id, promise.future) match {
          case null ⇒
            val tryValue = Try(id.createExtension(this))
            promise.complete(tryValue)
            val future = Promise.fromTry(tryValue).future
            _extensions.put(id, future) // speed up future accesses somewhat
            future
          case _ ⇒ getOrLoadExtension(id)
        }
      case x ⇒ x.asInstanceOf[Future[T]]
    }
} 
Example 51
Source File: FileIO.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.io.files

import java.io.File
import java.nio.channels.FileChannel
import java.nio.file.{FileSystems, Files, Path, StandardOpenOption}

import scala.util.control.NonFatal
import com.typesafe.config.Config
import swave.core.impl.util.SettingsCompanion
import swave.core.io.Bytes
import swave.core.macros._

object FileIO extends SpoutFromFiles with DrainToFiles {

  lazy val userHomePath: Path = FileSystems.getDefault.getPath(System getProperty "user.home")

  def resolveFileSystemPath(pathName: String): Path =
    if (pathName.length >= 2 && pathName.charAt(0) == '~' && pathName.charAt(1) == File.separatorChar) {
      userHomePath.resolve(pathName substring 2)
    } else FileSystems.getDefault.getPath(pathName)

  val WriteCreateOptions: Set[StandardOpenOption] = {
    import StandardOpenOption._
    Set(CREATE, TRUNCATE_EXISTING, WRITE)
  }

  final case class Settings(defaultFileReadingChunkSize: Int, defaultFileWritingChunkSize: Int) {
    requireArg(defaultFileReadingChunkSize > 0, "`defaultFileChunkSize` must be > 0")
    requireArg(defaultFileWritingChunkSize >= 0, "`defaultFileWritingChunkSize` must be >= 0")

    def withDefaultFileReadingChunkSize(defaultFileReadingChunkSize: Int) =
      copy(defaultFileReadingChunkSize = defaultFileReadingChunkSize)
    def withDefaultFileWritingChunkSize(defaultFileWritingChunkSize: Int) =
      copy(defaultFileWritingChunkSize = defaultFileWritingChunkSize)
  }

  object Settings extends SettingsCompanion[Settings]("swave.core.file-io") {
    def fromSubConfig(c: Config): Settings =
      Settings(
        defaultFileReadingChunkSize = c getInt "default-file-reading-chunk-size",
        defaultFileWritingChunkSize = c getInt "default-file-writing-chunk-size")
  }

  def writeFile[T: Bytes](fileName: String, data: T): Unit = writeFile(resolveFileSystemPath(fileName), data)
  def writeFile[T: Bytes](file: File, data: T): Unit       = writeFile(file.toPath, data)
  def writeFile[T: Bytes](path: Path, data: T, options: StandardOpenOption*): Unit = {
    implicit def decorator(value: T): Bytes.Decorator[T] = Bytes.decorator(value)
    Files.write(path, data.toArray, options: _*)
    ()
  }

  def readFile[T: Bytes](fileName: String): T = readFile(resolveFileSystemPath(fileName))
  def readFile[T: Bytes](file: File): T       = readFile(file.toPath)
  def readFile[T: Bytes](path: Path): T       = implicitly[Bytes[T]].apply(Files.readAllBytes(path))

  private[io] def quietClose(channel: FileChannel): Unit =
    try channel.close()
    catch { case NonFatal(_) ⇒ }
} 
Example 52
Source File: Timing.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.testkit

import com.typesafe.config.Config
import swave.core.impl.util.SettingsCompanion

import scala.collection._
import scala.concurrent.duration._
import scala.collection.JavaConverters._
import swave.core.macros._
import swave.core.util._

object Timing {

  final case class Settings(factor: Double, scalingChain: immutable.Seq[Double], singleExpectDefault: FiniteDuration) {

    requireArg(factor > 0.0 && !factor.isInfinite, "`click` must be finite and > 0")
    requireArg(
      scalingChain.nonEmpty && scalingChain.size <= 16,
      "`scalingChain` must be non-empty and have at most 16 elements")
    requireArg(singleExpectDefault > Duration.Zero, "`singleExpectDefault` must be > 0")
  }

  object Settings extends SettingsCompanion[Settings]("swave.test.timing") {
    def fromSubConfig(c: Config): Settings =
      Settings(
        factor = c getDouble "factor",
        scalingChain = c.getDoubleList("scaling-chain").asScala.map(_.doubleValue)(breakOut),
        singleExpectDefault = c getFiniteDuration "single-expect-default"
      )
  }
} 
Example 53
Source File: Testkit.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.testkit

import com.typesafe.config.Config
import swave.core.impl.util.SettingsCompanion
import swave.core.macros._

object Testkit {

  final case class Settings(timingDefaults: Timing.Settings)

  object Settings extends SettingsCompanion[Settings]("swave.test") {
    def fromSubConfig(c: Config): Settings =
      Settings(timingDefaults = Timing.Settings fromSubConfig c.getConfig("timing"))
  }

  sealed abstract class Signal

  object Signal {
    final case class Request(n: Long)      extends Signal { requireArg(n > 0, s"`n` must be > 0") }
    case object Cancel                     extends Signal
    final case class OnNext(value: Any)    extends Signal
    case object OnComplete                 extends Signal
    final case class OnError(e: Throwable) extends Signal
  }
} 
Example 54
Source File: PluginRegistry.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package plugins

import com.typesafe.config.{Config, ConfigFactory}
import java.io.File
import play.api.Logger
import scala.concurrent.{ExecutionContext, Future}
import scala.io.Source

object PluginRegistry {
  
  private val PLUGIN_DIR = new File("plugins") 

  Logger.info("Loading plugin configurations:")
  
  private val configs: Seq[(Config, File)] = 
    findFilesRecursive("plugin.conf", PLUGIN_DIR)
      .map { file => 
        val config = ConfigFactory.parseFile(file) 
        val dir = file.getParentFile
        (config, dir)
      }
  
  configs.foreach { case (c, dir) =>
    Logger.info(s"  ${c.getString("extends")}.${c.getString("id")}")
  }
  
  Logger.info(s"${configs.size} configurations found")
  
  
  private def findFilesRecursive(name: String, dir: File): Seq[File] = {
    if (dir.exists) {
      val all = dir.listFiles

      val dirs = all.filter(_.isDirectory)
      val files = all.filter(_.isFile)

      val matchingFiles = files.filter(_.getName == name)

      matchingFiles ++ dirs.flatMap(dir => findFilesRecursive(name, dir))
    } else Seq()
  }
          
  def listConfigs(extensionPoint: String): Seq[Config] =
    configs
      .filter(_._1.getString("extends").equalsIgnoreCase(extensionPoint))
      .map(_._1)

  private def readFile(extensionPoint: String, id: String, filename: String)(implicit ctx: ExecutionContext) =
    scala.concurrent.blocking {
      Future {
        configs.find { case (config, dir) => 
          config.getString("extends").equalsIgnoreCase(extensionPoint) &&
          config.getString("id").equalsIgnoreCase(id)
        } map { case (config, dir) => 
          val file = new File(dir, filename)
          Source.fromFile(file).getLines.mkString("\n")
        }
      }
    }  

  def loadPlugin(extensionPoint: String, id: String)(implicit ctx: ExecutionContext): Future[Option[String]] =
    readFile(extensionPoint, id, "plugin.js")

  def loadCSS(extensionPoint: String, id: String)(implicit ctx: ExecutionContext): Future[Option[String]] =
    readFile(extensionPoint, id, "plugin.css")

} 
Example 55
package io.gzet.recommender

import com.typesafe.config.Config
import io.gzet.recommender.Config._
import org.apache.spark.SparkContext
import org.apache.spark.graphx.Graph
import spark.jobserver._

object PersonalizedPlaylistBuilder extends SparkJob with NamedRddSupport {

  override def runJob(sc: SparkContext, conf: Config): Any = {

    val id = conf.getLong("song.id")

    val edges = this.namedRdds.get[Edge](RDD_EDGE).get
    val nodes = this.namedRdds.get[Node](RDD_NODE).get

    val edgeRDD = edges.flatMap({e =>
      e.targets.zip(e.weights).map({case (target, weight) =>
        org.apache.spark.graphx.Edge(e.source, target.toLong, weight.toDouble)
      })
    })

    val songIdsB = sc.broadcast(nodes.map(n => (n.id, n.name)).collectAsMap())

    val graph = Graph.fromEdges(edgeRDD, 0L)
    graph.cache()
    val prGraph = graph.personalizedPageRank(id, TOLERANCE, TELEPORT)

    prGraph.vertices.mapPartitions({ it =>
      val songIds = songIdsB.value
      it map { case (vId, pr) =>
        (vId, songIds.getOrElse(vId, "UNKNOWN"), pr)
      }
    }).sortBy(_._3, ascending = false).map(v => List(v._1, v._3, v._2).mkString(",")).collect()

  }

  override def validate(sc: SparkContext, config: Config): SparkJobValidation = {
    if(!config.hasPath("song.id")) return SparkJobInvalid("Missing parameter [song.id]")
    if(this.namedRdds.get[Edge](RDD_EDGE).isEmpty) return SparkJobInvalid("Missing RDD [edges]")
    if(this.namedRdds.get[Edge](RDD_NODE).isEmpty) return SparkJobInvalid("Missing RDD [nodes]")
    SparkJobValid
  }

} 
Example 56
Source File: PlaylistBuilder.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.recommender

import com.datastax.spark.connector._
import com.typesafe.config.Config
import io.gzet.recommender.Config._
import org.apache.spark.SparkContext
import org.apache.spark.graphx.Graph
import spark.jobserver._

object PlaylistBuilder extends SparkJob with NamedRddSupport {

  override def runJob(sc: SparkContext, conf: Config): Any = {

    val recordRDD = sc.cassandraTable[Record](KEYSPACE, TABLE_RECORD)
    val hashRDD = sc.cassandraTable[Hash](KEYSPACE, TABLE_HASH)

    val minSimilarityB = sc.broadcast(MIN_SIMILARITY)
    val songIdsB = sc.broadcast(recordRDD.map(r => (r.id, r.name)).collectAsMap())

    implicit class Crossable[X](xs: Traversable[X]) {
      def cross[Y](ys: Traversable[Y]) = for { x <- xs; y <- ys } yield (x, y)
    }

    val songHashRDD = hashRDD flatMap { hash =>
      hash.songs map { song =>
        ((hash, song), 1)
      }
    }

    val songTfRDD = songHashRDD map { case ((hash, songId), count) =>
      (songId, count)
    } reduceByKey(_+_)

    val songTfB = sc.broadcast(songTfRDD.collectAsMap())

    val crossSongRDD = songHashRDD.keys.groupByKey().values flatMap { songIds =>
      songIds cross songIds filter { case (from, to) =>
        from != to
      } map(_ -> 1)
    } reduceByKey(_+_) map { case ((from, to), count) =>
      val weight = count.toDouble / songTfB.value.getOrElse(from, 1)
      org.apache.spark.graphx.Edge(from, to, weight)
    } filter { edge =>
      edge.attr > minSimilarityB.value
    }

    val graph = Graph.fromEdges(crossSongRDD, 0L)
    val prGraph = graph.pageRank(TOLERANCE, TELEPORT)

    val edges = prGraph.edges.map({ edge =>
      (edge.srcId, (edge.dstId, edge.attr))
    }).groupByKey().map({case (srcId, it) =>
      val dst = it.toList
      val dstIds = dst.map(_._1.toString)
      val weights = dst.map(_._2.toString)
      Edge(srcId, dstIds, weights)
    })

    val vertices = prGraph.vertices.mapPartitions({ vertices =>
      val songIds = songIdsB.value
      vertices map { case (vId, pr) =>
        Node(vId, songIds.getOrElse(vId, "UNKNOWN"), pr)
      }
    })

    edges.saveAsCassandraTable(KEYSPACE, TABLE_EDGE)
    vertices.saveAsCassandraTable(KEYSPACE, TABLE_NODE)

    this.namedRdds.update(RDD_EDGE, edges)
    this.namedRdds.update(RDD_NODE, vertices)

  }

  override def validate(sc: SparkContext, config: Config): SparkJobValidation = {
    SparkJobValid
  }



} 
Example 57
Source File: IndexBuilder.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.recommender

import com.datastax.spark.connector._
import com.typesafe.config.Config
import io.gzet.recommender.Config._
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.spark.SparkContext
import spark.jobserver._

object IndexBuilder extends SparkJob {

  override def runJob(sc: SparkContext, conf: Config): Any = {

    val inputDir = conf.getString("input.dir")

    val sampleSizeB = sc.broadcast(SAMPLE_SIZE)
    val audioSongRDD = AudioLibrary.read(inputDir, sc, MIN_TIME, MAX_TIME)
    val songRDD = audioSongRDD.keys.sortBy(song => song).zipWithIndex().mapValues(l => l + 1)
    val songIdsB = sc.broadcast(songRDD.collectAsMap())

    val audioRDD = audioSongRDD mapPartitions { audios =>
      val songIds = songIdsB.value
      audios map { case (song, audio) =>
        (songIds.get(song).get, audio)
      }
    }

    val sampleRDD = audioRDD flatMap { case (songId, audio) =>
      audio.sampleByTime(sampleSizeB.value) map { sample =>
        (songId, sample)
      }
    }

    val recordRDD = songRDD map { case (name, id) =>
      Record(id, name)
    }

    val hashRDD = sampleRDD.map({case (songId, sample) =>
      ((sample.hash, songId), Array(sample.id))
    }).reduceByKey(_ ++ _).mapValues(a => a.mkString(",")).map({case ((hash, songId), sampleIds) =>
      (hash, songId)
    }).groupByKey().mapValues(it => it.toList).map({case (id, songs) =>
      Hash(id, songs)
    })

    hashRDD.saveAsCassandraTable(KEYSPACE, TABLE_HASH)
    recordRDD.saveAsCassandraTable(KEYSPACE, TABLE_RECORD)

  }

  def containsWav(hdfs: FileSystem, path: Path) = {
    val it = hdfs.listFiles(path, false)
    var i = 0
    while(it.hasNext){
      if(it.next().getPath.getName.endsWith(".wav")){
        i += 1
      }
    }
    i > 0
  }

  override def validate(sc: SparkContext, config: Config): SparkJobValidation = {

    if(!config.hasPath("input.dir")) {
      SparkJobInvalid("Missing parameter [input.dir]")
    } else {
      val hdfs = FileSystem.get(sc.hadoopConfiguration)
      val path = new Path(config.getString("input.dir"))
      val isDir = hdfs.isDirectory(path)
      val isValid = containsWav(hdfs, path)
      hdfs.close()
      if(isDir && isValid) {
        SparkJobValid
      } else {
        SparkJobInvalid("Input directory does not contains .wav files")
      }
    }

  }

} 
Example 58
Source File: Main.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.apiServer

import akka.actor.{ ActorRef, ActorSystem }
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import com.github.j5ik2o.bank.adaptor.aggregate.{ BankAccountAggregateFlowsImpl, ShardedBankAccountAggregates }
import com.github.j5ik2o.bank.adaptor.controller.Routes
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.generator.IdGenerator
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.useCase.{ BankAccountAggregateUseCase, BankAccountReadModelUseCase }
import com.typesafe.config.{ Config, ConfigFactory }
import pureconfig._
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile

import scala.concurrent.ExecutionContextExecutor

object Main extends App {
  val rootConfig: Config                    = ConfigFactory.load()
  val dbConfig: DatabaseConfig[JdbcProfile] = DatabaseConfig.forConfig[JdbcProfile](path = "slick", rootConfig)

  implicit val system: ActorSystem                        = ActorSystem("bank-system", config = rootConfig)
  implicit val materializer: ActorMaterializer            = ActorMaterializer()
  implicit val executionContext: ExecutionContextExecutor = system.dispatcher

  val bankAccountIdGenerator = IdGenerator.ofBankAccountId(dbConfig.profile, dbConfig.db)

  val bankAccountAggregatesRef: ActorRef =
    system.actorOf(ShardedBankAccountAggregates.props, ShardedBankAccountAggregates.name)

  val bankAccountAggregateUseCase: BankAccountAggregateUseCase = new BankAccountAggregateUseCase(
    new BankAccountAggregateFlowsImpl(bankAccountAggregatesRef)
  )

  val bankAccountReadModelUseCase: BankAccountReadModelUseCase =
    new BankAccountReadModelUseCase(new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
                                    new JournalReaderImpl())

  val routes: Routes = Routes(bankAccountIdGenerator, bankAccountAggregateUseCase, bankAccountReadModelUseCase)

  val ApiServerConfig(host, port) =
    loadConfigOrThrow[ApiServerConfig](system.settings.config.getConfig("bank.api-server"))

  val bindingFuture = Http().bindAndHandle(routes.root, host, port)

  sys.addShutdownHook {
    bindingFuture
      .flatMap(_.unbind())
      .onComplete(_ => system.terminate())
  }
} 
Example 59
Source File: PersistenceSpec.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.aggregate

import akka.actor.{ ActorRef, ActorSystem }
import com.github.j5ik2o.bank.adaptor.util.{ ActorSpec, FlywayWithMySQLSpecSupport }
import com.typesafe.config.Config

abstract class PersistenceSpec(system: ActorSystem) extends ActorSpec(system) with FlywayWithMySQLSpecSupport {

  def this(name: String, config: Config) = this(ActorSystem(name, config))

  protected def killActors(actors: ActorRef*): Unit = {
    actors.foreach { actor =>
      watch(actor)
      system.stop(actor)
      expectTerminated(actor)
    }
  }

} 
Example 60
Source File: StatsDReporter.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.jjagged.metrics.reporting.statsd.StatsD
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.typesafe.config.Config


  def report(): Unit = {

    reporter.report(metrics.metricRegistry.getGauges(),
      metrics.metricRegistry.getCounters(),
      metrics.metricRegistry.getHistograms(),
      metrics.metricRegistry.getMeters(),
      metrics.metricRegistry.getTimers())
  }

  private[reporting] def getReporter(): com.github.jjagged.metrics.reporting.StatsDReporter = {

    log.info("Initializing the StatsD metrics reporter");

    com.github.jjagged.metrics.reporting.StatsDReporter.forRegistry(metrics.metricRegistry)
      .prefixedWith(this.prefix)
      .withTags("{'host':'" + host + "', 'application':'" + application.replace(' ', '-').toLowerCase() + "'}")
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build(statsD);

  }

  private[reporting] def getStatsD(): StatsD = {
    new StatsD(statsdHost, port);
  }
} 
Example 61
Source File: DogStatsDReporter.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.typesafe.config.Config
import org.coursera.metrics.datadog.DatadogReporter.Expansion
import org.coursera.metrics.datadog.transport.{Transport, UdpTransport}
import org.coursera.metrics.datadog.DefaultMetricNameFormatter

import scala.collection.JavaConverters._

class DogStatsDReporter(implicit val system: ActorSystem, val config: Config) extends ScheduledReporter with LoggingAdapter {

  private lazy val reporter = getReporter
  private lazy val transport = getTransport

  private[reporting] val dogHost = config.getString("host")
  private[reporting] val port = config.getInt("port")

  private[reporting] val prefix = config.getString("metric-prefix")
  private[reporting] val apiKey = config.getString("api-key")

  private[reporting] val tags = config.getStringList("tags").asScala ++ Seq(
    s"app:${application.replace(" ", "-").toLowerCase}",
    s"version:$version")

  
  def report(): Unit = {

    reporter.report(metrics.metricRegistry.getGauges(),
      metrics.metricRegistry.getCounters(),
      metrics.metricRegistry.getHistograms(),
      metrics.metricRegistry.getMeters(),
      metrics.metricRegistry.getTimers())
  }

  private[reporting] def getReporter(): org.coursera.metrics.datadog.DatadogReporter = {

    log.info("Initializing the DogStatsD metrics reporter")
    org.coursera.metrics.datadog.DatadogReporter.forRegistry(metrics.metricRegistry)
        .withExpansions(Expansion.ALL)
        .withHost(host)
        .withMetricNameFormatter(new DefaultMetricNameFormatter())
        .withPrefix(prefix)
        .withTags(tags.asJava)
        .withTransport(transport)
        .convertRatesTo(TimeUnit.SECONDS)
        .convertDurationsTo(TimeUnit.MILLISECONDS)
        .build()
  }

  private[reporting] def getTransport(): Transport = {
    new UdpTransport.Builder().withStatsdHost(dogHost).withPort(port).build()
  }

} 
Example 62
Source File: MetricsReportingManager.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.ConfigurationException
import akka.actor.{Actor, ActorSystem, Props}
import com.github.vonnagy.service.container.health.{GetHealth, HealthInfo, HealthState, RegisteredHealthCheckActor}
import com.github.vonnagy.service.container.log.ActorLoggingAdapter
import com.github.vonnagy.service.container.metrics.Metrics
import com.typesafe.config.{Config, ConfigObject}

import scala.collection.JavaConverters._
import scala.concurrent.duration.FiniteDuration

object MetricsReportingManager {
  def props(): Props =
    Props(classOf[MetricsReportingManager])
}


  private[reporting] def stopReporters(): Unit = {
    reporters.foreach(_.stop)
    reporters = Seq.empty[ScheduledReporter]
  }

  private def checkHealth(): HealthInfo = {
    if (reporters.length == 0) {
      HealthInfo("metrics-reporting", HealthState.OK, s"The system is currently not managing any metrics reporters")
    }
    else {
      val x = for {
        reporter <- reporters
      } yield {
        reporter.getClass.getName
      }

      HealthInfo("metrics-reporting", HealthState.OK, s"The system is currently managing ${reporters.length} metrics reporters", Some(x))
    }
  }
} 
Example 63
Source File: Slf4jReporter.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.slf4j.LoggerFactory

class Slf4jReporter(implicit val system: ActorSystem, val config: Config) extends ScheduledReporter {

  lazy val reporter = getReporter

  
  def report(): Unit = {

    reporter.report(metrics.metricRegistry.getGauges(),
      metrics.metricRegistry.getCounters(),
      metrics.metricRegistry.getHistograms(),
      metrics.metricRegistry.getMeters(),
      metrics.metricRegistry.getTimers());
  }

  private[reporting] def getReporter: com.codahale.metrics.Slf4jReporter = {
    com.codahale.metrics.Slf4jReporter.forRegistry(metrics.metricRegistry)
      .outputTo(LoggerFactory.getLogger(config.getString("logger")))
      .convertRatesTo(TimeUnit.SECONDS)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .build
  }
} 
Example 64
Source File: ContainerBuilder.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container

import com.github.vonnagy.service.container.core.CoreConfig
import com.github.vonnagy.service.container.health.HealthCheck
import com.github.vonnagy.service.container.http.routing.RoutedEndpoints
import com.github.vonnagy.service.container.listener.ContainerLifecycleListener
import com.github.vonnagy.service.container.service.ContainerService


case class ContainerBuilder(
                             endpoints: Seq[Class[_ <: RoutedEndpoints]] = Seq.empty,
                             healthChecks: Seq[HealthCheck] = Seq.empty,
                             props: Seq[(String, Props)] = Seq.empty,
                             listeners: Seq[ContainerLifecycleListener] = Seq.empty,
                             config: Config = ConfigFactory.empty,
                             name: String = "service-container",
                             system: Option[ActorSystem] = None
                           ) extends CoreConfig {

  def withConfig(conf: Config): ContainerBuilder = copy(config = conf)

  def withRoutes(routes: Class[_ <: RoutedEndpoints]*): ContainerBuilder = copy(endpoints = routes)

  def withConfigValue(name: String, value: Any): ContainerBuilder =
    copy(config = this.config.withValue(name, ConfigValueFactory.fromAnyRef(value)))

  def withHealthChecks(checks: HealthCheck*): ContainerBuilder = copy(healthChecks = checks)

  def withActors(actors: (String, Props)*): ContainerBuilder = copy(props = actors)

  def withListeners(obs: ContainerLifecycleListener*): ContainerBuilder = copy(listeners = obs)

  def withActorSystem(sys: ActorSystem): ContainerBuilder = copy(system = Some(sys))

  def withName(name: String): ContainerBuilder = copy(name = name)

  def build: ContainerService = {
    implicit val actorSystem = system.getOrElse(ActorSystem.create(name, getConfig(Some(config))))
    val svc = new ContainerService(endpoints, healthChecks, props, listeners, name) with App
    svc
  }

  def validateConfig(paths: String*) = {
    paths.foreach { path =>
      if (!config.hasPath(path)) {
        throw new MissingConfigException(s"Missing required config property: '$path'.")
      }
    }
  }
}

class MissingConfigException(s: String) extends RuntimeException(s) 
Example 65
Source File: RoutingHandler.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http.routing

import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model._
import akka.http.scaladsl.server._
import akka.http.scaladsl.settings.RoutingSettings
import com.github.vonnagy.service.container.http.routing.Rejection.{DuplicateRejection, NotFoundRejection}
import com.github.vonnagy.service.container.http.{DefaultMarshallers, RejectionResponse}
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.typesafe.config.Config
import org.json4s.jackson.Serialization

import scala.collection.immutable
import scala.util.control.NonFatal

trait RoutingHandler extends Directives with DefaultMarshallers with LoggingAdapter {

  def conf: Config
  implicit val routeSettings = RoutingSettings(conf)
  implicit val marshaller: ToEntityMarshaller[AnyRef] = jsonMarshaller

  
  implicit val rejectionHandler = new RejectionHandler {
    val orig = RejectionHandler.newBuilder()
      .handle { case NotFoundRejection(errorMsg) => complete(NotFound, errorMsg) }
      .handle { case DuplicateRejection(errorMsg) => complete(BadRequest, errorMsg) }
      .handle { case MalformedRequestContentRejection(errorMsg, _) => complete(UnprocessableEntity, errorMsg) }
      .handleNotFound { complete((NotFound, "The requested resource could not be found.")) }
      .result
      .seal

    def apply(v1: immutable.Seq[Rejection]): Option[Route] = {
      val originalResult = orig(v1).getOrElse(complete(StatusCodes.InternalServerError))

      Some(mapResponse(transformExceptionRejection) {
        originalResult
      })
    }
  }

  private def transformExceptionRejection(response: HttpResponse): HttpResponse = {
    response.entity match {
      // If the entity isn't Strict (and it definitely will be), don't bother
      // converting, just throw an error, because something's weird.
      case strictEntity: HttpEntity.Strict =>
        val rej = RejectionResponse(response.status.intValue, response.status.defaultMessage,
          strictEntity.data.utf8String)

        response.withEntity(HttpEntity(ContentType(MediaTypes.`application/json`),
          Serialization.write(rej)))

      case _ =>
        throw new Exception("Unexpected entity type")
    }
  }

} 
Example 66
Source File: CoreConfig.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.core

import java.io.File

import com.typesafe.config.{Config, ConfigFactory}

trait CoreConfig {

  
  def getConfig(config: Option[Config]): Config = {

    val sysConfig = System.getProperty("config.file") match {
      // If we were not passed a Config then check to see if a config file
      // exists within a conf directory under the application context
      case null if new File("conf/application.conf").exists => ConfigFactory.load("conf/application.conf")
      // Load the default
      case null => ConfigFactory.load()
      // If there is a system property for the file then use that
      case f => ConfigFactory.parseFile(new File(f))
    }

    (config match {
      case Some(conf) => conf.withFallback(sysConfig)
      case None => sysConfig
    }).withFallback(ConfigFactory.load()).resolve()
  }

} 
Example 67
Source File: ModelFeature.scala    From modelmatrix   with Apache License 2.0 5 votes vote down vote up
package com.collective.modelmatrix

import com.collective.modelmatrix.transform.Transform
import com.typesafe.config.Config
import org.apache.spark.sql.catalyst.SqlParser

import scala.util.{Failure, Success, Try}
import scalaz.ValidationNel
import scalaz.syntax.apply._
import scalaz.syntax.validation._

case class ModelFeature(
  active: Boolean,
  group: String,
  feature: String,
  extract: String,
  transform: Transform
)

object ModelFeature {

  // Validate extract expressions using SqlParser that used in DataFrame.selectExpr
  private val sqlParser = new SqlParser()

  def parse(feature: String, config: Config, path: String): ValidationNel[String, ModelFeature] = {
    parse(feature, config.getConfig(path))
  }

  def parse(feature: String, config: Config): ValidationNel[String, ModelFeature] = {

    def string(p: String) = parameter(p)(_.getString)

    def expression(p: String) = {
      import scalaz.Validation.FlatMap._
      string(p).flatMap { input =>
        Try(sqlParser.parseExpression(input)) match {
          case Success(parsed) => input.successNel
          case Failure(err)    => s"Failed to parse extract expression: $err".failureNel
        }
      }
    }

    def boolean(p: String) = parameter(p)(_.getBoolean)

    def transform(p: String): ValidationNel[String, Transform] =
      string(p).fold(_.failure, {
        case "identity"  => Transform.identity(config)
        case "top"       => Transform.top(config)
        case "index"     => Transform.index(config)
        case "bins"      => Transform.bins(config)
        case unknown     => s"Unknown transform type: $unknown".failureNel
      })

    def parameter[P](p: String)(f: Config => String => P): ValidationNel[String, P] =
      Try(f(config)(p)) match {
        case Success(s) =>
          s.successNel
        case Failure(err) =>
          s"Can't parse parameter '$p'. Error: ${err.getMessage}".failureNel
      }

    (
      boolean("active")      |@|
      string("group")        |@|
      feature.successNel     |@|
      expression("extract")  |@|
      transform("transform")
    )(ModelFeature.apply)
  }

} 
Example 68
Source File: ModelConfigurationParser.scala    From modelmatrix   with Apache License 2.0 5 votes vote down vote up
package com.collective.modelmatrix

import java.nio.charset.CodingErrorAction
import java.security.MessageDigest
import java.util.function.BiConsumer

import com.typesafe.config.{Config, ConfigValue}

import scala.io.Codec
import scalaz.{Failure, Success, ValidationNel}

class ModelConfigurationParser(config: Config, path: String = "features") {

  type FeatureDefinition = (String, ValidationNel[String, ModelFeature])

  private lazy val configLines: Seq[(String, Int)] = {
    implicit val codec = Codec("UTF-8")
    codec.onMalformedInput(CodingErrorAction.REPLACE)
    codec.onUnmappableCharacter(CodingErrorAction.REPLACE)
    contentLines.zipWithIndex
  }

  // Try to find feature row index in original config if possible
  private def featureIndex(f: String): Int = {
    configLines.find(_._1.contains(f)).map(_._2).getOrElse(0)
  }

  private[this] val originUrl = config.origin().url()

  // configuration file as lines
  lazy val contentLines: Seq[String] = {
    if (originUrl != null) {
      scala.io.Source.fromURL(originUrl).getLines().toSeq
      // ideally this case below should never happen unless the Config passed in argument is not parsed from a file
    } else Seq.empty
  }

  // configuration file as a String
  lazy val content: String = contentLines.mkString(System.lineSeparator())

  // md5sum of the configuration content
  lazy val checksum: String = MessageDigest.getInstance("MD5").digest(content.getBytes).map("%02X".format(_)).mkString

  def features(): Seq[FeatureDefinition] = {
    val builder = collection.mutable.ListBuffer.empty[FeatureDefinition]

    config.getObject(path).forEach(new BiConsumer[String, ConfigValue] {
      def accept(t: String, u: ConfigValue): Unit = {
        val parsedFeature = ModelFeature.parse(t, u.atKey(t), t)
        builder += (t -> parsedFeature)
      }
    })

    builder.toSeq.sortBy {
      case (f, Success(feature)) => (true, featureIndex(feature.feature), feature.group, feature.feature)
      case (f, Failure(_)) => (false, featureIndex(f), "", f)
    }
  }
} 
Example 69
Source File: DatabaseConfig.scala    From modelmatrix   with Apache License 2.0 5 votes vote down vote up
package com.collective.modelmatrix.db

import com.collective.modelmatrix.db.DatabaseConfig.{DatabaseType, H2, PG}
import com.typesafe.config.{Config, ConfigFactory}
import slick.driver.{JdbcDriver, H2Driver, JdbcProfile, PostgresDriver}

object GenericSlickDriver extends JdbcDriver

object DatabaseConfig {

  val driverPath = "driver"
  val urlPath = "url"

  case class DatabaseType(name: String,
                          driverClass: String,
                          slickDriver: JdbcProfile,
                          urlPrefix: String)

  object PG extends DatabaseType("pg", "org.postgresql.Driver", PostgresDriver, "jdbc:postgresql:")

  object H2 extends DatabaseType("h2", "org.h2.Driver", H2Driver, "jdbc:h2:")

}

// database configuration wrapper class that read the configuration files and
// determines the proper slick driver to use
class DatabaseConfig(configFilePath: String = "") {
  private[this] val dbConfigPath: String = "modelmatrix.catalog.db"

  private[this] lazy val dbConfig: Config =
    if (configFilePath.isEmpty) ConfigFactory.load().getConfig(dbConfigPath)
    else ConfigFactory.systemProperties().withFallback(ConfigFactory.load(configFilePath)).getConfig(dbConfigPath)

  // get the DatabaseType based on the driver name
  private[this] lazy val dbType: DatabaseType =
    (dbConfig.getString(DatabaseConfig.driverPath), dbConfig.getString(DatabaseConfig.urlPath)) match {
      case pg if PG.driverClass == pg._1 && pg._2.startsWith(PG.urlPrefix) => PG
      case h2 if H2.driverClass == h2._1 && h2._2.startsWith(H2.urlPrefix) => H2
      case unknown => sys.error(s"The following db driver '${unknown._1}' with url '${unknown._2}' is not supported")
    }

  lazy val dbUrl = dbConfig.getString(DatabaseConfig.urlPath)

  def slickDriver: JdbcProfile = {
    dbType.slickDriver
  }

  def database(): GenericSlickDriver.api.Database = {
    import GenericSlickDriver.api.Database
    Database.forConfig("", dbConfig)
  }

  def migrationPath: String = {
    "db/migration/%s".format(dbType.name)
  }
}

// default database configuration wrapper that will be used in production
object DefaultDatabaseConfig extends DatabaseConfig 
Example 70
Source File: DatabaseFactory.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.factory

import com.gabry.job.db.access._
import com.gabry.job.db.slicks.SlickDataAccessFactory
import com.gabry.job.utils.ExternalClassHelper._
import com.typesafe.config.{Config, ConfigFactory}

import scala.util.{Failure, Success, Try}

object DatabaseFactory {

  def getDataAccessFactory:Try[DataAccessFactory] =
    getDataAccessFactory(ConfigFactory.load())

  def getDataAccessFactory(config:Config):Try[DataAccessFactory] =
    getDataAccessFactory(config.getStringOr("db.type","slick"),config)

  def getDataAccessFactory(databaseType:String, config:Config):Try[DataAccessFactory] = {
    databaseType.toLowerCase match {
      case "slick" =>
        Success(new SlickDataAccessFactory(config.getConfig(s"db.$databaseType")))
      case "quill" =>
        Failure(new UnsupportedOperationException("quill unsupported database driver! json field not supported ! "))
      case unknownDatabaseType =>
        Failure(new IllegalArgumentException(s"unknown database type $unknownDatabaseType,supported database type is [slick]"))
    }
  }

} 
Example 71
Source File: SlickDataAccessFactory.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import com.gabry.job.db.access._
import com.typesafe.config.Config
import slick.jdbc.MySQLProfile.api._

class SlickDataAccessFactory(config:Config) extends DataAccessFactory{

  private final var database:Database = _
  private final var jobAccess:JobAccess = _
  private final var dependencyAccess:DependencyAccess = _
  private final var scheduleAccess:ScheduleAccess = _
  private final var taskAccess:TaskAccess = _

  override def getJobAccess: JobAccess = jobAccess

  override def getDependencyAccess: DependencyAccess = dependencyAccess

  override def getScheduleAccess: ScheduleAccess = scheduleAccess

  override def getTaskAccess: TaskAccess = taskAccess

  override def init(): Unit = {
    database = Database.forConfig("",config)
    jobAccess = new SlickJobAccess(database)
    dependencyAccess = new SlickDependencyAccess(database)
    scheduleAccess = new SlickScheduleAccess(database)
    taskAccess = new SlickTaskAccess(database)
  }

  override def destroy(): Unit = {
    // 不要判断database是否null,不要问为什么
    database.close()
    database = null
    jobAccess = null
    dependencyAccess = null
    scheduleAccess = null
    taskAccess = null
  }
} 
Example 72
Source File: RegistryFactory.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.core.registry

import com.gabry.job.core.registry.zookeeper.ZookeeperRegistry
import com.typesafe.config.Config

import scala.util.Try


  def getRegistry(config:Config):Try[AbstractRegistry] = Try{
      val registryType = config.getString("registry.type")
      val registryConfig = config.getConfig(s"registry.$registryType")
      registryType.toLowerCase() match {
          case "zookeeper" =>
            new ZookeeperRegistry(registryType,registryConfig)
          case otherType =>
            throw new IllegalArgumentException(s"unsupported registry type $otherType")
      }
  }
} 
Example 73
Source File: TZookeeperRegistry.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.test

import com.gabry.job.core.domain.Node
import com.gabry.job.core.registry.RegistryEvent.RegistryEvent
import com.gabry.job.core.registry.{AbstractRegistry, RegistryFactory, RegistryListener}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{BeforeAndAfterAll, FunSuite}


      override def onEvent(node: Node, event: RegistryEvent): Unit = {
        assert(regNode == node)
      }
    })
    registry.registerNode(regNode)
    Thread.sleep(1000)
  }
  def zkConfigStr = """registry{
                   |  type = "zookeeper"
                   |  zookeeper{
                   |    hosts = "dn1:2181,dn3:2181,dn4:2181"
                   |    exponential-backoff-retry {
                   |      base-sleep-timeMs = 1000
                   |      max-retries = 3
                   |    }
                   |    root-path = "/lemon-schedule"
                   |  }
                   |}
                   """.stripMargin
} 
Example 74
Source File: AkkaJobClient.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.client.akkaclient

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.pattern.{AskTimeoutException, ask}
import akka.util.Timeout
import com.gabry.job.client.AbstractJobClient
import com.gabry.job.core.command.JobClientCommand
import com.gabry.job.core.domain.{Dependency, Job}
import com.gabry.job.core.event.{FailedEvent, JobTrackerEvent}
import com.gabry.job.core.registry.{Registry, RegistryFactory}
import com.typesafe.config.Config

import scala.concurrent.ExecutionContextExecutor
import scala.util.{Failure, Success}

  override def cancelJob(jobId: Long,force:Boolean): Unit = {
    clientActor ? JobClientCommand.CancelJob(jobId,force) onComplete{
      case Success(_) =>
        println("作业取消成功")
      case Failure(reason) =>
        reason.printStackTrace()
        println(s"作业取消失败 ${reason.getMessage}")
    }
  }
} 
Example 75
Source File: SocketConfig.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import com.typesafe.config.Config
import play.api.libs.json.Json

case class SocketConfig (
  stdin_port: Int,
  control_port: Int,
  hb_port: Int,
  shell_port: Int,
  iopub_port: Int,
  ip : String,
  transport: String,
  signature_scheme: String,
  key: String
)

object SocketConfig {
  implicit val socketConfigReads = Json.reads[SocketConfig]
  implicit val socketConfigWrites = Json.writes[SocketConfig]

  def fromConfig(config: Config) = {
    new SocketConfig(
      config.getInt("stdin_port"),
      config.getInt("control_port"),
      config.getInt("hb_port"),
      config.getInt("shell_port"),
      config.getInt("iopub_port"),
      config.getString("ip"),
      config.getString("transport"),
      config.getString("signature_scheme"),
      config.getString("key")
    )
  }
} 
Example 76
Source File: ClientBootstrap.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.boot

import akka.actor.ActorSystem
import org.apache.toree.comm.{CommRegistrar, CommStorage}
import org.apache.toree.kernel.protocol.v5.client.boot.layers._
import org.apache.toree.kernel.protocol.v5.client.socket.{SocketConfig, SocketFactory}
import org.apache.toree.kernel.protocol.v5.client.{SimpleActorLoader, SparkKernelClient}
import org.apache.toree.utils.LogLike
import com.typesafe.config.Config
import org.zeromq.ZMQ

object ClientBootstrap {
  
  def createClient(
    actorSystemName: String = ClientBootstrap.newActorSystemName()
  ): SparkKernelClient = {
    logger.trace(s"Creating new kernel client actor system, '$actorSystemName'")
    val actorSystem = ActorSystem(actorSystemName)

    logger.trace(s"Creating actor loader for actor system, '$actorSystemName'")
    val actorLoader = SimpleActorLoader(actorSystem)

    logger.trace(s"Creating socket factory for actor system, '$actorSystemName")
    val socketFactory = new SocketFactory(SocketConfig.fromConfig(config))

    logger.trace(s"Initializing underlying system for, '$actorSystemName'")
    val (_, _, _, _, commRegistrar, _) =
      initializeSystem(config, actorSystem, actorLoader, socketFactory)

    logger.trace(s"Initializing handlers for, '$actorSystemName'")
    initializeHandlers(actorSystem, actorLoader)

    logger.trace(s"ZeroMQ version: ${ZMQ.getVersionString}")

    new SparkKernelClient(actorLoader, actorSystem, commRegistrar)
  }
} 
Example 77
Source File: AddJar.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.magic.builtin

import java.io.{File, PrintStream}
import java.net.{URL, URI}
import java.nio.file.{Files, Paths}
import java.util.zip.ZipFile
import org.apache.toree.magic._
import org.apache.toree.magic.builtin.AddJar._
import org.apache.toree.magic.dependencies._
import org.apache.toree.utils.{ArgumentParsingSupport, DownloadSupport, LogLike, FileUtils}
import com.typesafe.config.Config
import org.apache.hadoop.fs.Path
import org.apache.toree.plugins.annotations.Event

object AddJar {
  val HADOOP_FS_SCHEMES = Set("hdfs", "s3", "s3n", "file")

  private var jarDir:Option[String] = None

  def getJarDir(config: Config): String = {
    jarDir.getOrElse({
      jarDir = Some(
        if(config.hasPath("jar_dir") && Files.exists(Paths.get(config.getString("jar_dir")))) {
          config.getString("jar_dir")
        } else {
          FileUtils.createManagedTempDirectory("toree_add_jars").getAbsolutePath
        }
      )
      jarDir.get
    })
  }
}

class AddJar
  extends LineMagic with IncludeInterpreter
  with IncludeOutputStream with DownloadSupport with ArgumentParsingSupport
  with IncludeKernel with IncludePluginManager with IncludeConfig with LogLike
{
  // Option to mark re-downloading of jars
  private val _force =
    parser.accepts("f", "forces re-download of specified jar")

  // Option to mark re-downloading of jars
  private val _magic =
    parser.accepts("magic", "loads jar as a magic extension")

  // Lazy because the outputStream is not provided at construction
  private def printStream = new PrintStream(outputStream)

  )
      } else {
        downloadFile(
          new URL(jarRemoteLocation),
          new File(downloadLocation).toURI.toURL
        )
      }

      // Report download finished
      printStream.println(s"Finished download of $jarName")
    } else {
      printStream.println(s"Using cached version of $jarName")
    }

    // validate jar file
    if(! isValidJar(fileDownloadLocation)) {
      throw new IllegalArgumentException(s"Jar '$jarName' is not valid.")
    }

    if (_magic) {
      val plugins = pluginManager.loadPlugins(fileDownloadLocation)
      pluginManager.initializePlugins(plugins)
    } else {
      kernel.addJars(fileDownloadLocation.toURI)
    }
  }
} 
Example 78
Source File: SocketConfig.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import com.typesafe.config.Config
import play.api.libs.json.Json

case class SocketConfig (
  stdin_port: Int,
  control_port: Int,
  hb_port: Int,
  shell_port: Int,
  iopub_port: Int,
  ip : String,
  transport: String,
  signature_scheme: String,
  key: String
)

object SocketConfig {
  implicit val socketConfigReads = Json.reads[SocketConfig]
  implicit val socketConfigWrites = Json.writes[SocketConfig]

  def fromConfig(config: Config) = {
    new SocketConfig(
      config.getInt("stdin_port"),
      config.getInt("control_port"),
      config.getInt("hb_port"),
      config.getInt("shell_port"),
      config.getInt("iopub_port"),
      config.getString("ip"),
      config.getString("transport"),
      config.getString("signature_scheme"),
      config.getString("key")
    )
  }
} 
Example 79
Source File: FactoryMethods.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.api

import java.io.{InputStream, OutputStream}

import org.apache.toree.kernel.protocol.v5
import org.apache.toree.kernel.protocol.v5.{KMBuilder, KernelMessage}
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.stream.{KernelOutputStream, KernelInputStream}
import com.typesafe.config.Config


  override def newKernelOutputStream(
    streamType: String = KernelOutputStream.DefaultStreamType,
    sendEmptyOutput: Boolean = config.getBoolean("send_empty_output")
  ): OutputStream = {
    new v5.stream.KernelOutputStream(
      actorLoader,
      kmBuilder,
      org.apache.toree.global.ScheduledTaskManager.instance,
      streamType = streamType,
      sendEmptyOutput = sendEmptyOutput
    )
  }
} 
Example 80
Source File: InterpreterManager.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.boot.layer

import org.apache.toree.kernel.api.KernelLike
import com.typesafe.config.Config
import org.apache.toree.interpreter._
import org.apache.toree.kernel.interpreter.scala.ScalaInterpreter

import scala.collection.JavaConverters._
import org.slf4j.LoggerFactory

case class InterpreterManager(
  default: String = "Scala",
  interpreters: Map[String, Interpreter] = Map[String, Interpreter]()
) {

  //Scala Interpreter is handled separately
  def initializeRegularInterpreters(kernel: KernelLike): Unit = interpreters
    .filterNot { case (name, interp) => name == "Scala" && interp.isInstanceOf[ScalaInterpreter] }
    .foreach { case (_, interpreter) => interpreter.init(kernel) }

  def initializeInterpreters(kernel: KernelLike): Unit = {
    interpreters.values.foreach(interpreter =>
      interpreter.init(kernel)
    )
  }

  def addInterpreter(
    name:String,
    interpreter: Interpreter
  ): InterpreterManager = {
    copy(interpreters = interpreters + (name -> interpreter))
  }

  def defaultInterpreter: Option[Interpreter] = {
    interpreters.get(default)
  }

  
  private def instantiate(className:String, config:Config):Interpreter = {
    try {
      Class
        .forName(className)
        .getConstructor(Class.forName("com.typesafe.config.Config"))
        .newInstance(config).asInstanceOf[Interpreter]
    }
    catch {
      case e: NoSuchMethodException =>
        logger.debug("Using default constructor for class " + className)
        Class
          .forName(className)
          .newInstance().asInstanceOf[Interpreter]
    }

  }

} 
Example 81
Source File: SparkKernelClientDeployer.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package test.utils.root

import org.apache.toree.kernel.protocol.v5.client.boot.ClientBootstrap
import org.apache.toree.kernel.protocol.v5.client.boot.layers._
import com.typesafe.config.{ConfigFactory, Config}


object SparkKernelClientDeployer {
   lazy val startInstance = {
     val profileJSON: String = """
     {
         "stdin_port":   48691,
         "control_port": 40544,
         "hb_port":      43462,
         "shell_port":   44808,
         "iopub_port":   49691,
         "ip": "127.0.0.1",
         "transport": "tcp",
         "signature_scheme": "hmac-sha256",
         "key": ""
     }
                               """.stripMargin
     val config: Config = ConfigFactory.parseString(profileJSON)
     (new ClientBootstrap(config)
       with StandardSystemInitialization
       with StandardHandlerInitialization).createClient()
   }
 } 
Example 82
Source File: MassSettings.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass

import akka.actor.Address
import akka.actor.typed.ActorSystem
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import helloscala.common.Configuration
import mass.core.Constants._

final class Compiles(c: Configuration) {
  def scala213Home: String = c.getString("scala213")
  def scala212Home: String = c.getString("scala212")
  def scala211Home: String = c.getString("scala211")
}

final class MassSettings private (val configuration: Configuration) extends StrictLogging {
  val compiles = new Compiles(configuration.getConfiguration(s"$BASE_CONF.core.compiles"))

  def clusterName: String = configuration.getString(BASE_CONF + ".cluster.name")

  def clusterProtocol: String = configuration.getString(BASE_CONF + ".cluster.protocol")

  def clusterSeeds: List[Address] =
    configuration
      .get[Seq[String]](BASE_CONF + ".cluster.seeds")
      .map { seed =>
        val Array(host, port) = seed.split(':')
        Address(clusterProtocol, clusterName, host, port.toInt)
      }
      .toList
}

object MassSettings {
  def apply(configuration: Configuration): MassSettings = new MassSettings(configuration)
  def apply(config: Config): MassSettings = apply(Configuration(config))
  def apply(system: ActorSystem[_]): MassSettings = apply(system.settings.config)
} 
Example 83
Source File: Mass.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass

import akka.actor.typed.scaladsl.adapter._
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, Props }
import akka.{ actor => classic }
import com.typesafe.config.Config
import fusion.common.config.FusionConfigFactory
import fusion.common.{ ReceptionistFactory, SpawnFactory }
import fusion.core.extension.FusionCore
import helloscala.common.Configuration
import mass.core.Constants

import scala.concurrent.ExecutionContext

final class Mass private (val classicSystem: classic.ActorSystem) extends SpawnFactory with ReceptionistFactory {
  implicit def executionContext: ExecutionContext = classicSystem.dispatcher

  val configuration: Configuration = FusionCore(classicSystem).configuration

  override def typedSystem: ActorSystem[_] = classicSystem.toTyped

  override def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] =
    classicSystem.spawnAnonymous(behavior, props)

  override def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] =
    classicSystem.spawn(behavior, name, props)
}

object Mass {
  def fromMergedConfig(config: Config): Mass =
    fromActorSystem(classic.ActorSystem(Constants.MASS, config))

  private[mass] def fromActorSystem(system: classic.ActorSystem): Mass = new Mass(system)

  def fromConfig(originalConfig: Config): Mass = {
    val config = FusionConfigFactory.arrangeConfig(originalConfig, Constants.MASS, Seq("akka"))
    fromMergedConfig(config)
  }
} 
Example 84
Source File: MassApplication.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.core.ext

import akka.actor.ActorSystem
import com.typesafe.config.Config
import fusion.common.config.FusionConfigFactory
import fusion.common.constant.FusionConstants
import mass.Mass

class MassApplication(val classicSystem: ActorSystem) {
  def this(config: Config) =
    this(MassApplication.createActorSystem(FusionConfigFactory.arrangeConfig(config, FusionConstants.FUSION)))

  val mass: Mass = Mass.fromActorSystem(classicSystem)
}

object MassApplication {
  def createActorSystem(config: Config): ActorSystem = {
    val name = config.getString("fusion.akka-name")
    ActorSystem(name, config)
  }
} 
Example 85
Source File: SafeConfigReader.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.config

import com.typesafe.config.Config
import org.slf4j.LoggerFactory

import scala.util.{Failure, Success, Try}

class SafeConfigReader(config: Config) {

  private val log = LoggerFactory.getLogger(getClass)

  private def safeRead[A](f: String => A)(key: String): Option[A] =
    Try(f(key)) match {
      case Failure(error) =>
        log.warn(s"Key $key not found: ${error.getMessage}.")
        None
      case Success(value) =>
        Some(value)
    }

  def string(key: String): Option[String] = safeRead[String](config.getString)(key)

  def list(key: String): List[String] = {
    import scala.collection.JavaConverters._
    safeRead[java.util.List[String]](config.getStringList)(key).toList.flatMap(_.asScala)
  }

  def objectKeyList(key: String): List[String] = {
    import scala.collection.JavaConverters._
    Try {
      config.getAnyRef(key)
        .asInstanceOf[java.util.Map[String, java.util.List[String]]]
        .asScala.keys.toList
    } match {
      case Failure(error) =>
        log.warn(s"Key $key not found: ${error.getMessage}.")
        List.empty[String]
      case Success(values) =>
        values
    }
  }

  def objectMap(key: String): Map[String, String] = {
    import scala.collection.JavaConverters._
    Try {
      config.getAnyRef(key)
        .asInstanceOf[java.util.Map[String, String]]
        .asScala
        .toMap
    } match {
      case Failure(error) =>
        log.warn(s"Key $key not found: ${error.getMessage}.")
        Map.empty[String, String]
      case Success(map) => map
    }
  }

  def objectMapOfList(key: String): Map[String, List[String]] = {
    import scala.collection.JavaConverters._
    Try {
      config.getAnyRef(key)
        .asInstanceOf[java.util.Map[String, java.util.List[String]]]
        .asScala
        .map(kv => (kv._1, kv._2.asScala.toList))
        .toMap
    } match {
      case Failure(error) =>
        log.warn(s"Key $key not found: ${error.getMessage}.")
        Map.empty[String, List[String]]
      case Success(map) => map
    }
  }

} 
Example 86
Source File: TargetConfigParser.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.config

import com.typesafe.config.Config
import configs.Result
import configs.syntax._

object TargetConfigParser {
  def apply(config: Config): Result[TargetConfig] =
    (
      common(config) ~
      local(config) ~
      aws(config)
    )(TargetConfig)

  def common(config: Config): Result[CommonConfig] =
    (
      config.get[String]("hadoop.conf.dir") ~
      config.get[List[String]]("hive.aux.jars") ~
      config.get[String]("hadoop.version") ~
      config.get[String]("base.os")
    )(CommonConfig)

  def local(config: Config): Result[LocalConfig] =
    (
      config.get[String]("local.cluster.user") ~
      config.get[List[String]]("local.ports") ~
      config.get[List[String]]("local.docker.files") ~
      config.get[String]("local.docker.containerId")
    )(LocalConfig)

  def aws(config: Config): Result[AWSConfig] =
    (
      config.get[String]("aws.access.id") ~
      config.get[String]("aws.access.key") ~
      config.get[String]("aws.instance.type") ~
      config.get[Config]("aws.volume.spec").flatMap(volumeSpec) ~
      config.get[Boolean]("aws.auto.volumes") ~
      config.get[String]("aws.user") ~
      config.get[String]("aws.region") ~
      config.get[String]("aws.subnet") ~
      config.get[String]("aws.security.group") ~
      config.get[String]("aws.key.pair") ~
      config.get[String]("aws.key.file") ~
      config.get[String]("aws.base.image.id") ~
      config.get[String]("s3.bucket.prefix")
    )(AWSConfig)

  def volumeSpec(config: Config): Result[VolumeSpec] =
    (
      config.get[List[Config]]("master").flatMap(c => Result.sequence(c.map(volume))) ~
      config.get[List[Config]]("slave").flatMap(c => Result.sequence(c.map(volume)))
    )(VolumeSpec)

  def volume(config: Config): Result[Volume] =
    (
      config.get[String]("name") ~
      config.get[Int]("size")
    )(Volume)
} 
Example 87
Source File: SourceConfigParser.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.config

import com.typesafe.config.Config
import configs.Result
import configs.Result.Success
import configs.syntax._

object SourceConfigParser {

  def apply(config: Config): Result[SourceConfig] = (
    config.get[String]("address") ~
    tables(config) ~
    copy(config) ~
    config.get[String]("s3.hdfs.scheme") ~
    config.get[Double]("default.sample.prob") ~
    config.get[Int]("default.partition.count") ~
    config.get[Option[Long]]("default.sample.size") ~
    config.get[String]("sample.database") ~
    parallelism(config) ~
    gateway(config)
  )(SourceConfig)

  def tables(config: Config): Result[List[TableConfig]] =
    config.get[List[Config]]("tables").flatMap(ts => Result.sequence(ts.map(table)))

  def table(config: Config): Result[TableConfig] = (
    config.get[String]("name") ~
    config.get[Option[Double]]("sample.prob") ~
    config.get[Option[Long]]("sample.size") ~
    config.get[List[List[String]]]("partitions").orElse(Success(List.empty)) ~
    config.get[Option[Int]]("partition.count") ~
    config.get[Boolean]("skip.cleanup").orElse(Success(false))
  )(TableConfig)

  def copy(config: Config) = (
    config.get[String]("copy.scheme") ~
    config.get[Long]("copy.sample.threshold") ~
    config.get[Boolean]("copy.overwriteIfExists") ~
    config.get[List[String]]("copy.listeners").orElse(Success(List.empty)) ~
    config.get[List[String]]("sample.listeners").orElse(Success(List.empty))
  )(CopyConfig)

  def gateway(config: Config) = (
    config.get[List[String]]("gateway.docker.files") ~
    config.get[List[String]]("gateway.docker.ports") ~
    config.get[Config]("gateway").map(mapify(_) -- List("docker.files", "docker.ports"))
  )(GatewayConfig)

  def parallelism(config: Config) = (
    config.get[Int]("parallelism.table") ~
    config.get[Int]("parallelism.partition")
  )(ParallelismConfig)
} 
Example 88
Source File: CheckpointWriter.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.config

import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions, ConfigValueFactory}

import collection.JavaConverters._

object CheckpointWriter {
  def apply(checkpoint: Checkpoint): Config = {
    import checkpoint._
    ConfigFactory.empty
      .withValue("created", ConfigValueFactory.fromAnyRef(created.toEpochMilli))
      .withValue("updated", ConfigValueFactory.fromAnyRef(updated.toEpochMilli))
      .withValue("todo", ConfigValueFactory.fromIterable(todo.asJava))
      .withValue("finished", ConfigValueFactory.fromIterable(finished.asJava))
      .withValue("failed", ConfigValueFactory.fromIterable(failed.asJava))
      .withValue("invalid", ConfigValueFactory.fromIterable(invalid.asJava))
  }

  def render(checkpoint: Checkpoint): String = {
    apply(checkpoint).root.render(ConfigRenderOptions.defaults().setJson(false).setOriginComments(false))
  }
} 
Example 89
Source File: ConfigLoader.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.config

import java.net.URL

import com.criteo.dev.cluster.Public
import com.typesafe.config.{Config, ConfigFactory}
import configs.Result
import configs.Result.Success
import configs.syntax._

@Public
object ConfigLoader {
  def apply(
             source: URL,
             target: URL,
             checkpoint: Option[URL]
           ): Result[GlobalConfig] = apply(
    ConfigFactory.parseURL(source).resolve(),
    ConfigFactory.parseURL(target).resolve(),
    checkpoint.map(ConfigFactory.parseURL(_).resolve)
  )

  def apply(
             source: Config,
             target: Config,
             checkpoint: Option[Config]
           ): Result[GlobalConfig] = (
    SourceConfigParser(source) ~
    TargetConfigParser(target) ~
    checkpoint.fold(Success(None): Result[Option[Checkpoint]])(CheckpointParser.apply(_).map(Some(_)))
  )(GlobalConfig(_,_,_)).map(_.withBackCompat)
} 
Example 90
Source File: CheckpointParser.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.config

import java.time.Instant

import com.typesafe.config.Config
import configs.Result
import configs.syntax._

object CheckpointParser {
  def apply(config: Config): Result[Checkpoint] =
    (
      config.get[Long]("created").map(Instant.ofEpochMilli(_)) ~
      config.get[Long]("updated").map(Instant.ofEpochMilli(_)) ~
      config.get[Set[String]]("todo") ~
      config.get[Set[String]]("finished") ~
      config.get[Set[String]]("failed") ~
      config.get[Set[String]]("invalid")
    )(Checkpoint)
} 
Example 91
Source File: Settings.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.state

import com.typesafe.config.Config
import net.ceedubs.ficus.Ficus._

case class Settings(
    networkConfigFile: String,
    aliasesFile: String,
    restTxsFile: String,
    blocksFile: String,
    accountsFile: String,
    assetsFile: String,
    dataFile: String
)

object Settings {
  def fromConfig(config: Config): Settings = {
    import net.ceedubs.ficus.readers.ArbitraryTypeReader._
    config.as[Settings]("waves.benchmark.state")
  }
} 
Example 92
Source File: FicusImplicits.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.generator.config

import com.google.common.base.CaseFormat
import com.typesafe.config.{Config, ConfigRenderOptions}
import com.wavesplatform.generator.Worker
import com.wavesplatform.state.DataEntry
import com.wavesplatform.transaction.{TransactionParser, TransactionParsers}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.{CollectionReaders, ValueReader}
import play.api.libs.json._

import scala.concurrent.duration.{Duration, FiniteDuration}

trait FicusImplicits {

  private[this] val byName: Map[String, TransactionParser] = TransactionParsers.all.map {
    case (_, builder) => builder.getClass.getSimpleName.replaceAll("\\$$", "") -> builder
  }

  private def by(name: String): Option[TransactionParser] = byName.get(name)

  implicit val distributionsReader: ValueReader[Map[TransactionParser, Double]] = {
    val converter                                = CaseFormat.LOWER_HYPHEN.converterTo(CaseFormat.UPPER_CAMEL)
    def toTxType(key: String): TransactionParser = by(converter.convert(key)).get

    CollectionReaders.mapValueReader[Double].map { xs =>
      xs.map {
        case (k, v) =>
          toTxType(k) -> v
      }
    }
  }

  implicit val dataEntryReader: ValueReader[DataEntry[_]] = (config: Config, path: String) =>
    Json.parse(config.getConfig(path).root().render(ConfigRenderOptions.concise())).as[DataEntry[_]]

  implicit val workerSettingsReader: ValueReader[Worker.Settings] = (config: Config, path: String) => {
    def readWaitUtxOrDelay(path: String, default: FiniteDuration): Either[FiniteDuration, FiniteDuration] =
      if (config.hasPath(path)) {
        val value = config.as[String](path)
        if (value == "empty-utx") Right(default)
        else {
          val duration: Duration = Duration(value)
          Left(FiniteDuration(duration.length, duration.unit))
        }
      } else Right(default)

    val utxLimit         = config.as[Int](s"$path.utx-limit")
    val delay            = config.as[FiniteDuration](s"$path.delay")
    val tailInitialDelay = readWaitUtxOrDelay(s"$path.tail-initial-delay", delay)
    val initialDelay     = readWaitUtxOrDelay(s"$path.initial-delay", delay)
    val workingTime      = config.as[FiniteDuration](s"$path.working-time")
    val autoReconnect    = config.as[Boolean](s"$path.auto-reconnect")
    val reconnectDelay   = config.as[FiniteDuration](s"$path.reconnect-delay")

    def readWarmUp(warmUpConfig: Config): Worker.WarmUp = {
      val warmUpStart    = warmUpConfig.as[Int](s"start")
      val warmUpEnd      = warmUpConfig.as[Option[Int]](s"end").getOrElse(utxLimit)
      val warmUpStep     = warmUpConfig.as[Int](s"step")
      val warmUpDuration = warmUpConfig.as[Option[FiniteDuration]](s"duration")
      val warmUpOnce     = warmUpConfig.as[Option[Boolean]](s"once").getOrElse(true)
      Worker.WarmUp(warmUpStart, warmUpEnd, warmUpStep, warmUpDuration, warmUpOnce)
    }

    val warmUp     = readWarmUp(config.getConfig(s"$path.warm-up"))
    val initWarmUp = if (config.hasPath(s"$path.initial-warm-up")) Some(readWarmUp(config.getConfig(s"$path.init-warm-up"))) else None

    Worker.Settings(utxLimit, delay, tailInitialDelay, initialDelay, workingTime, autoReconnect, reconnectDelay, warmUp, initWarmUp)
  }
} 
Example 93
Source File: NodeConfigs.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import com.typesafe.config.{Config, ConfigFactory}

import scala.jdk.CollectionConverters._
import scala.util.Random

object NodeConfigs {

  private val NonConflictingNodes = Set(1, 4, 6, 7)

  val Default: Seq[Config] = ConfigFactory.parseResources("nodes.conf").getConfigList("nodes").asScala.toSeq
  val Miners: Seq[Config]  = Default.init
  val NotMiner: Config     = Default.last
  def randomMiner: Config  = Random.shuffle(Miners).head

  def newBuilder: Builder = Builder(Default, Default.size, Seq.empty)

  case class Builder(baseConfigs: Seq[Config], defaultEntities: Int, specialsConfigs: Seq[Config]) {
    def overrideBase(f: Templates.type => String): Builder = {
      val priorityConfig = ConfigFactory.parseString(f(Templates))
      copy(baseConfigs = this.baseConfigs.map(priorityConfig.withFallback))
    }

    def withDefault(entitiesNumber: Int): Builder = copy(defaultEntities = entitiesNumber)

    def withSpecial(f: Templates.type => String): Builder = withSpecial(1, f)

    def withSpecial(entitiesNumber: Int, f: Templates.type => String): Builder = {
      val newSpecialConfig = ConfigFactory.parseString(f(Templates))
      copy(specialsConfigs = this.specialsConfigs ++ (1 to entitiesNumber).map(_ => newSpecialConfig))
    }

    def build(shuffleNodes: Boolean = true): Seq[Config] = {
      val totalEntities = defaultEntities + specialsConfigs.size
      require(totalEntities < baseConfigs.size)

      val baseConfigsShuffled = if (shuffleNodes) Random.shuffle(baseConfigs) else baseConfigs
      val (defaultNodes: Seq[Config], specialNodes: Seq[Config]) = baseConfigsShuffled
        .take(totalEntities)
        .splitAt(defaultEntities)

      specialNodes
        .zip(specialsConfigs)
        .foldLeft(defaultNodes) { case (r, (base, special)) => r :+ special.withFallback(base) }
    }

    // To eliminate a race of miners
    def buildNonConflicting(): Seq[Config] = {
      val totalEntities = defaultEntities + specialsConfigs.size
      require(totalEntities <= NonConflictingNodes.size)

      val (defaultNodes: Seq[Config], specialNodes: Seq[Config]) = baseConfigs.zipWithIndex
        .collect { case (x, i) if NonConflictingNodes.contains(i + 1) => x }
        .splitAt(defaultEntities)

      specialNodes
        .zip(specialsConfigs)
        .foldLeft(defaultNodes) { case (r, (base, special)) => r :+ special.withFallback(base) }
    }
  }

  object Templates {
    def raw(x: String): String = x
    def quorum(n: Int): String = s"waves.miner.quorum = $n"
    def preactivatedFeatures(f: (Int, Int)*): String = {
      s"""
         |waves.blockchain.custom.functionality.pre-activated-features {
         ${f.map {case (id, height) => s"|  $id = $height"}.mkString("\n")}
         |}""".stripMargin
    }
    def minAssetInfoUpdateInterval(blocks: Int): String =
      s"waves.blockchain.custom.functionality.min-asset-info-update-interval = $blocks"

    val nonMiner: String = "waves.miner.enable = no"
  }

} 
Example 94
Source File: BaseSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import java.io.File

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.transactions.NodesFromDocker
import monix.eval.Coeval
import org.scalatest._

import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext

class BaseSuite
    extends FreeSpec
    with ReportingTestName
    with NodesFromDocker
    with Matchers
    with CancelAfterFailure
    with BeforeAndAfterAll
    with BeforeAndAfterEach {
  protected implicit val ec: ExecutionContext = ExecutionContext.Implicits.global

  protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .withSpecial(_.nonMiner)
      .buildNonConflicting()

  def miner: Node            = nodes.head
  def notMiner: Node         = nodes.last
  protected def sender: Node = miner

  // protected because https://github.com/sbt/zinc/issues/292
  protected val theNodes: Coeval[Seq[Node]] = Coeval.evalOnce {
    Option(System.getProperty("waves.it.config.file")) match {
      case None => dockerNodes()
      case Some(filePath) =>
        val defaultConfig = ConfigFactory.load()
        ConfigFactory
          .parseFile(new File(filePath))
          .getConfigList("nodes")
          .asScala
          .toSeq
          .map(cfg => new ExternalNode(cfg.withFallback(defaultConfig).resolve()))
    }
  }

  override protected def nodes: Seq[Node] = theNodes()

  protected override def beforeAll(): Unit = {
    theNodes.run
    super.beforeAll()
  }
} 
Example 95
Source File: Node.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import java.net.{InetSocketAddress, URL}
import java.util.concurrent.TimeUnit

import com.typesafe.config.Config
import com.wavesplatform.account.{KeyPair, PublicKey}
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.util.GlobalTimer
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.state.diffs.FeeValidation
import com.wavesplatform.utils.LoggerFacade
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.asynchttpclient.Dsl.{config => clientConfig, _}
import org.asynchttpclient._
import org.slf4j.LoggerFactory

import scala.concurrent.duration.FiniteDuration

abstract class Node(val config: Config) extends AutoCloseable {
  lazy val log: LoggerFacade =
    LoggerFacade(LoggerFactory.getLogger(s"${getClass.getCanonicalName}.${this.name}"))

  val settings: WavesSettings = WavesSettings.fromRootConfig(config)
  val client: AsyncHttpClient = asyncHttpClient(
    clientConfig()
      .setKeepAlive(false)
      .setNettyTimer(GlobalTimer.instance))

  lazy val grpcChannel: ManagedChannel = ManagedChannelBuilder.forAddress(networkAddress.getHostString, nodeExternalPort(6870))
    .usePlaintext()
    .keepAliveWithoutCalls(true)
    .keepAliveTime(30, TimeUnit.SECONDS)
    .build()

  val keyPair: KeyPair  = KeyPair.fromSeed(config.getString("account-seed")).explicitGet()
  val publicKey: PublicKey = PublicKey.fromBase58String(config.getString("public-key")).explicitGet()
  val address: String      = config.getString("address")

  def nodeExternalPort(internalPort: Int): Int
  def nodeApiEndpoint: URL
  def apiKey: String

  
  def networkAddress: InetSocketAddress

  override def close(): Unit = client.close()
}

object Node {
  implicit class NodeExt(val n: Node) extends AnyVal {
    def name: String               = n.settings.networkSettings.nodeName
    def publicKeyStr: String       = n.publicKey.toString
    def fee(txTypeId: Byte): Long  = FeeValidation.FeeConstants(txTypeId) * FeeValidation.FeeUnit
    def blockDelay: FiniteDuration = n.settings.blockchainSettings.genesisSettings.averageBlockDelay
  }
} 
Example 96
Source File: BaseTransactionSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.transactions

import java.io.File

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it._
import monix.eval.Coeval
import org.scalatest.{BeforeAndAfterAll, FunSuite, Suite}

import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext

trait BaseTransactionSuiteLike extends WaitForHeight2 with IntegrationSuiteWithThreeAddresses with BeforeAndAfterAll with NodesFromDocker {
  this: Suite =>

  protected implicit val ec: ExecutionContext = ExecutionContext.Implicits.global

  protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .withSpecial(_.nonMiner)
      .buildNonConflicting()

  override def miner: Node = nodes.head

  // protected because https://github.com/sbt/zinc/issues/292
  protected val theNodes: Coeval[Seq[Node]] = Coeval.evalOnce {
    Option(System.getProperty("waves.it.config.file")) match {
      case None => dockerNodes()
      case Some(filePath) =>
        val defaultConfig = ConfigFactory.load()
        ConfigFactory
          .parseFile(new File(filePath))
          .getConfigList("nodes")
          .asScala
          .toSeq
          .map(cfg => new ExternalNode(cfg.withFallback(defaultConfig).resolve()))
    }
  }

  override protected def nodes: Seq[Node] = theNodes()

  protected override def beforeAll(): Unit = {
    theNodes.run
    super.beforeAll()
  }
}

abstract class BaseTransactionSuite extends FunSuite with BaseTransactionSuiteLike 
Example 97
Source File: FairPoSTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{CancelAfterFailure, FunSuite}
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import scala.concurrent.duration._

class FairPoSTestSuite extends FunSuite with CancelAfterFailure with NodesFromDocker {
  import FairPoSTestSuite._

  override protected def nodeConfigs: Seq[Config] = Configs

  test("blockchain grows with FairPoS activated") {
    nodes.waitForSameBlockHeadersAt(height = 10, conditionAwaitTime = 11.minutes)

    val txId = nodes.head.transfer(nodes.head.address, nodes.last.address, transferAmount, minFee).id
    nodes.last.waitForTransaction(txId)

    val heightAfterTransfer = nodes.head.height

    nodes.waitForSameBlockHeadersAt(heightAfterTransfer + 10, conditionAwaitTime = 11.minutes)
  }
}

object FairPoSTestSuite {
  import com.wavesplatform.it.NodeConfigs._
  private val microblockActivationHeight = 0
  private val fairPoSActivationHeight    = 10
  private val vrfActivationHeight        = 14

  private val config =
    ConfigFactory.parseString(s"""
    |waves {
    |   blockchain.custom {
    |      functionality {
    |        pre-activated-features {1 = $microblockActivationHeight, 8 = $fairPoSActivationHeight, 17 = $vrfActivationHeight}
    |        generation-balance-depth-from-50-to-1000-after-height = 1000
    |      }
    |   }
    |   miner.quorum = 1
    |}""".stripMargin)

  val Configs: Seq[Config] = Default.map(config.withFallback(_)).take(3)
} 
Example 98
Source File: DebugPortfoliosSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.debug

import com.typesafe.config.Config
import com.wavesplatform.it.{Node, NodeConfigs}
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.util._
import com.wavesplatform.it.sync._
import org.scalatest.FunSuite

class DebugPortfoliosSuite extends FunSuite with NodesFromDocker {
  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(entitiesNumber = 1)
      .buildNonConflicting()

  private def sender: Node = nodes.head

  private val firstAddress  = sender.createAddress()
  private val secondAddress = sender.createAddress()

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    sender.transfer(sender.address, firstAddress, 20.waves, minFee, waitForTx = true)
    sender.transfer(sender.address, secondAddress, 20.waves, minFee, waitForTx = true)
  }

  test("getting a balance considering pessimistic transactions from UTX pool - changed after UTX") {
    val portfolioBefore = sender.debugPortfoliosFor(firstAddress, considerUnspent = true)
    val utxSizeBefore   = sender.utxSize

    sender.transfer(firstAddress, secondAddress, 5.waves, 5.waves)
    sender.transfer(secondAddress, firstAddress, 7.waves, 5.waves)

    sender.waitForUtxIncreased(utxSizeBefore)

    val portfolioAfter = sender.debugPortfoliosFor(firstAddress, considerUnspent = true)

    val expectedBalance = portfolioBefore.balance - 10.waves // withdraw + fee
    assert(portfolioAfter.balance == expectedBalance)

  }

  test("getting a balance without pessimistic transactions from UTX pool - not changed after UTX") {
    nodes.waitForHeightArise()

    val portfolioBefore = sender.debugPortfoliosFor(firstAddress, considerUnspent = false)
    val utxSizeBefore   = sender.utxSize

    sender.transfer(firstAddress, secondAddress, 5.waves, fee = 5.waves)
    sender.waitForUtxIncreased(utxSizeBefore)

    val portfolioAfter = sender.debugPortfoliosFor(firstAddress, considerUnspent = false)
    assert(portfolioAfter.balance == portfolioBefore.balance)
  }
} 
Example 99
Source File: BlacklistTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.Config
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{NodeConfigs, ReportingTestName}
import org.scalatest._
import scala.concurrent.duration._

class BlacklistTestSuite extends FreeSpec with Matchers with CancelAfterFailure with ReportingTestName with NodesFromDocker {

  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(1))
      .withDefault(2)
      .buildNonConflicting()

  private def primaryNode = dockerNodes().last

  private def otherNodes = dockerNodes().init

  "primary node should blacklist other nodes" in {
    otherNodes.foreach(n => primaryNode.blacklist(n.containerNetworkAddress))

    val expectedBlacklistedPeers = nodes.size - 1

    primaryNode.waitFor[Seq[BlacklistedPeer]](s"blacklistedPeers.size == $expectedBlacklistedPeers")(
      _ => primaryNode.blacklistedPeers,
      _.lengthCompare(expectedBlacklistedPeers) == 0,
      1.second
    )
  }

  "sleep while nodes are blocked" in {
    primaryNode.waitFor[Seq[BlacklistedPeer]](s"blacklistedPeers is empty")(_.blacklistedPeers, _.isEmpty, 5.second)
  }

  "and sync again" in {
    val baseHeight = nodes.map(_.height).max
    nodes.waitForSameBlockHeadersAt(baseHeight + 5)
  }

} 
Example 100
Source File: UTXAllowance.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.smartcontract

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.account.KeyPair
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.util._
import com.wavesplatform.it.{ReportingTestName, WaitForHeight2}
import com.wavesplatform.lang.v1.estimator.v2.ScriptEstimatorV2
import com.wavesplatform.transaction.smart.script.ScriptCompiler
import org.scalatest.{CancelAfterFailure, FreeSpec, Matchers}

class UTXAllowance extends FreeSpec with Matchers with WaitForHeight2 with CancelAfterFailure with ReportingTestName with NodesFromDocker {
  import UTXAllowance._

  override protected def nodeConfigs: Seq[Config] = Configs

  private def nodeA = nodes.head
  private def nodeB = nodes.last

  "create two nodes with scripted accounts and check UTX" in {
    val accounts = List(nodeA, nodeB).map(i => {

      val nodeAddress = i.createAddress()
      val acc         = KeyPair.fromSeed(i.seed(nodeAddress)).explicitGet()

      i.transfer(i.address, nodeAddress, 10.waves, 0.005.waves, None, waitForTx = true)

      val scriptText = s"""true""".stripMargin
      val script               = ScriptCompiler(scriptText, isAssetScript = false, ScriptEstimatorV2).explicitGet()._1.bytes().base64
      i.setScript(acc.toAddress.toString, Some(script), setScriptFee, waitForTx = true)

      acc
    })

    assertBadRequestAndMessage(
      nodeA
        .transfer(
          accounts.head.toAddress.toString,
          recipient = accounts.head.toAddress.toString,
          assetId = None,
          amount = 1.waves,
          fee = minFee + 0.004.waves,
          version = 2
        ),
      "transactions from scripted accounts are denied from UTX pool"
    )

    val txBId =
      nodeB
        .transfer(
          accounts(1).toAddress.toString,
          recipient = accounts(1).toAddress.toString,
          assetId = None,
          amount = 1.01.waves,
          fee = minFee + 0.004.waves,
          version = 2
        )
        .id

    nodes.waitForHeightArise()
    nodeA.findTransactionInfo(txBId) shouldBe None
  }

}

object UTXAllowance {
  import com.wavesplatform.it.NodeConfigs._
  private val FirstNode = ConfigFactory.parseString(s"""
                                                         |waves {
                                                         |  utx.allow-transactions-from-smart-accounts = false
                                                         |  miner {
                                                         |      quorum = 0
                                                         |      enable = yes
                                                         |  }
                                                         |}""".stripMargin)

  private val SecondNode = ConfigFactory.parseString(s"""
                                                          |waves {
                                                          |  utx.allow-transactions-from-smart-accounts = true
                                                          |  miner {
                                                          |      enable = no
                                                          |  }
                                                          |}""".stripMargin)

  val Configs: Seq[Config] = Seq(
    FirstNode.withFallback(Default.head),
    SecondNode.withFallback(Default(1))
  )

} 
Example 101
Source File: RideIssueTransactionSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.smartcontract

import java.nio.charset.StandardCharsets

import com.typesafe.config.Config
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.NodeConfigs.Default
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.TransactionInfo
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.lang.v1.estimator.v3.ScriptEstimatorV3
import com.wavesplatform.transaction.smart.script.ScriptCompiler
import org.scalatest.{Assertion, CancelAfterFailure}

class RideIssueTransactionSuite extends BaseTransactionSuite with CancelAfterFailure {
  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs
      .Builder(Default, 1, Seq.empty)
      .overrideBase(_.quorum(0))
      .buildNonConflicting()

  val assetName        = "Asset name"
  val assetDescription = "Asset description"
  val assetQuantity    = 2000

  val issueCheckV4 =
    compile(
      s"""
         | {-# STDLIB_VERSION 4 #-}
         | {-# CONTENT_TYPE EXPRESSION #-}
         | {-# SCRIPT_TYPE ACCOUNT #-}
         |
         | match tx {
         |   case i: IssueTransaction =>
         |     i.name        == "$assetName"         &&
         |     i.description == "$assetDescription"
         |
         |   case _ =>
         |     throw("unexpected")
         | }
         |
          """.stripMargin
    )

  val issueCheckV3 =
    compile(
      s"""
         | {-# STDLIB_VERSION 3 #-}
         | {-# CONTENT_TYPE EXPRESSION #-}
         | {-# SCRIPT_TYPE ACCOUNT #-}
         |
         | match tx {
         |   case i: IssueTransaction =>
         |     i.name        == base64'${ByteStr(assetName.getBytes(StandardCharsets.UTF_8)).base64}'        &&
         |     i.description == base64'${ByteStr(assetDescription.getBytes(StandardCharsets.UTF_8)).base64}'
         |
         |   case _ =>
         |     throw("unexpected")
         | }
         |
          """.stripMargin
    )

  test("check issuing asset name and description using V3 and V4 script") {
    assertSuccessIssue(firstAddress, issueCheckV3)
    assertSuccessIssue(secondAddress, issueCheckV4)
  }

  def compile(script: String): String =
    ScriptCompiler.compile(script, ScriptEstimatorV3).explicitGet()._1.bytes().base64

  def assertSuccessIssue(address: String, script: String): Assertion = {
    val setScriptId = sender.setScript(address, Some(script), setScriptFee, waitForTx = true).id

    val scriptInfo = sender.addressScriptInfo(address)
    scriptInfo.script.isEmpty shouldBe false
    scriptInfo.scriptText.isEmpty shouldBe false
    scriptInfo.script.get.startsWith("base64:") shouldBe true

    sender.transactionInfo[TransactionInfo](setScriptId).script.get.startsWith("base64:") shouldBe true

    val assetId = sender.issue(address, assetName, assetDescription, assetQuantity, fee = issueFee + smartFee, waitForTx = true).id

    sender.assertAssetBalance(address, assetId, assetQuantity)

    val asset = sender.assetsDetails(assetId)
    asset.name shouldBe assetName
    asset.description shouldBe assetDescription
  }
} 
Example 102
Source File: InvokeCalcIssueSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.smartcontract

import com.typesafe.config.Config
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.NodeConfigs.Default
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.it.util._
import com.wavesplatform.lang.v1.estimator.v3.ScriptEstimatorV3
import com.wavesplatform.state.BinaryDataEntry
import com.wavesplatform.transaction.smart.script.ScriptCompiler
import org.scalatest.{CancelAfterFailure, Matchers, OptionValues}

class InvokeCalcIssueSuite extends BaseTransactionSuite with Matchers with CancelAfterFailure with OptionValues {
  import InvokeCalcIssueSuite._

  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs
      .Builder(Default, 1, Seq.empty)
      .overrideBase(_.quorum(0))
      .overrideBase(_.preactivatedFeatures((BlockchainFeatures.BlockV5.id, 0), (BlockchainFeatures.BlockV5.id, 0)))
      .buildNonConflicting()

  private val smartAcc  = firstAddress
  private val callerAcc = secondAddress


  test("calculateAssetId should return right unique id for each invoke") {

    sender.setScript(
      smartAcc,
      Some(ScriptCompiler.compile(dAppV4, ScriptEstimatorV3).explicitGet()._1.bytes().base64),
      fee = setScriptFee + smartFee,
      waitForTx = true
    )
    sender
      .invokeScript(
        callerAcc,
        smartAcc,
        Some("i"),
        args = List.empty,
        fee = invokeFee + issueFee, // dAppV4 contains 1 Issue action
        waitForTx = true
      )
    val assetId = sender.getDataByKey(smartAcc, "id").as[BinaryDataEntry].value.toString

    sender
      .invokeScript(
        callerAcc,
        smartAcc,
        Some("i"),
        args = List.empty,
        fee = invokeFee + issueFee, // dAppV4 contains 1 Issue action
        waitForTx = true
      )
    val secondAssetId = sender.getDataByKey(smartAcc, "id").as[BinaryDataEntry].value.toString

    sender.assetBalance(smartAcc, assetId).balance shouldBe 100
    sender.assetBalance(smartAcc, secondAssetId).balance shouldBe 100

    val assetDetails = sender.assetsDetails(assetId)
    assetDetails.decimals shouldBe decimals
    assetDetails.name shouldBe assetName
    assetDetails.reissuable shouldBe reissuable
    assetDetails.description shouldBe assetDescr
    assetDetails.minSponsoredAssetFee shouldBe None

  }
}

object InvokeCalcIssueSuite {

  val assetName = "InvokeAsset"
  val assetDescr = "Invoke asset descr"
  val amount = 100
  val decimals = 0
  val reissuable = true

  private val dAppV4: String =
    s"""{-# STDLIB_VERSION 4 #-}
      |{-# CONTENT_TYPE DAPP #-}
      |
      |@Callable(i)
      |func i() = {
      |let issue = Issue("$assetName", "$assetDescr", $amount, $decimals, $reissuable, unit, 0)
      |let id = calculateAssetId(issue)
      |[issue,
      | BinaryEntry("id", id)]
      |}
      |
      |""".stripMargin
} 
Example 103
Source File: DataTransactionBodyBytesByteVectorSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.smartcontract

import com.typesafe.config.Config
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.TransactionInfo
import com.wavesplatform.it.sync.{setScriptFee, _}
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.lang.v1.compiler.Terms
import com.wavesplatform.lang.v1.estimator.v2.ScriptEstimatorV2
import com.wavesplatform.state.{BinaryDataEntry, DataEntry}
import com.wavesplatform.transaction.TxVersion
import com.wavesplatform.transaction.smart.script.ScriptCompiler

class DataTransactionBodyBytesByteVectorSuite extends BaseTransactionSuite {
  private def compile(scriptText: String) =
    ScriptCompiler.compile(scriptText, ScriptEstimatorV2).explicitGet()._1.bytes().base64

  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .buildNonConflicting()

  private val maxDataTxV1bodyBytesSize = 153530
  // actually lower than Terms.DataTxMaxBytes

  private val scriptV3 =
    compile(
      s"""
         |{-# STDLIB_VERSION 3 #-}
         |{-# CONTENT_TYPE EXPRESSION #-}
         |
         | match tx {
         |    case dtx: DataTransaction =>
         |      dtx.bodyBytes.size() == $maxDataTxV1bodyBytesSize &&
         |      dtx.data.size() == 5
         |
         |   case _ =>
         |      throw("unexpected")
         | }
         |
       """.stripMargin
    )

  private val scriptV4 =
    compile(
      s"""
         |{-# STDLIB_VERSION 4 #-}
         |{-# CONTENT_TYPE EXPRESSION #-}
         |
         | match tx {
         |   case dtx: DataTransaction =>
         |     dtx.bodyBytes.size() == ${Terms.DataTxMaxProtoBytes}         &&
         |     dtx.data.size() == 6                                         &&
         |     sigVerify(dtx.bodyBytes, dtx.proofs[0], dtx.senderPublicKey)
         |
         |  case _ =>
         |     throw("unexpected")
         | }
         |
       """.stripMargin
    )

  private val maxDataEntriesV1 =
    List(
      BinaryDataEntry("a", ByteStr.fill(22380)(1)),
      BinaryDataEntry("b", ByteStr.fill(DataEntry.MaxValueSize)(1)),
      BinaryDataEntry("c", ByteStr.fill(DataEntry.MaxValueSize)(1)),
      BinaryDataEntry("d", ByteStr.fill(DataEntry.MaxValueSize)(1)),
      BinaryDataEntry("e", ByteStr.fill(DataEntry.MaxValueSize)(1))
    )

  private val maxDataEntriesV2 =
    maxDataEntriesV1 :+ BinaryDataEntry("f", ByteStr.fill(12378)(1))

  test("filled data transaction body bytes") {
    checkByteVectorLimit(firstAddress, maxDataEntriesV1, scriptV3, TxVersion.V1)
    checkByteVectorLimit(secondAddress, maxDataEntriesV2, scriptV4, TxVersion.V2)
  }

  private def checkByteVectorLimit(address: String, data: List[BinaryDataEntry], script: String, version: TxVersion) = {
    val setScriptId = sender.setScript(address, Some(script), setScriptFee, waitForTx = true).id
    sender.transactionInfo[TransactionInfo](setScriptId).script.get.startsWith("base64:") shouldBe true

    val scriptInfo = sender.addressScriptInfo(address)
    scriptInfo.script.isEmpty shouldBe false
    scriptInfo.scriptText.isEmpty shouldBe false
    scriptInfo.script.get.startsWith("base64:") shouldBe true

    sender.putData(address, data, version = version, fee = calcDataFee(data, version) + smartFee, waitForTx = true).id

    val increasedData = data.head.copy(value = data.head.value ++ ByteStr.fromBytes(1)) :: data.tail
    assertBadRequestAndMessage(
      sender.putData(address, increasedData, version = version, fee = calcDataFee(data, version) + smartFee),
      "Too big sequences requested"
    )
  }
} 
Example 104
Source File: SetScriptBodyBytesByteVectorSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.smartcontract

import com.typesafe.config.Config
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.TransactionInfo
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.lang.v1.estimator.v2.ScriptEstimatorV2
import com.wavesplatform.transaction.smart.script.ScriptCompiler

class SetScriptBodyBytesByteVectorSuite extends BaseTransactionSuite {
  private def compile(scriptText: String) =
    ScriptCompiler.compile(scriptText, ScriptEstimatorV2).explicitGet()._1.bytes().base64

  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .buildNonConflicting()

  private val expectedBodyBytesSize = 32815

  private val verifierV3 =
    compile(
      s"""
         |{-# STDLIB_VERSION 3 #-}
         |{-# CONTENT_TYPE EXPRESSION #-}
         |
         | match tx {
         |    case sstx: SetScriptTransaction =>
         |      sstx.bodyBytes.size() == $expectedBodyBytesSize
         |
         |   case _ =>
         |      throw("unexpected")
         | }
         |
       """.stripMargin
    )

  private val verifierV4 =
    compile(
      s"""
         |{-# STDLIB_VERSION 4 #-}
         |{-# CONTENT_TYPE EXPRESSION #-}
         |
         | match tx {
         |   case sstx: SetScriptTransaction =>
         |     sstx.bodyBytes.size() == $expectedBodyBytesSize                 &&
         |     sigVerify(sstx.bodyBytes, sstx.proofs[0], sstx.senderPublicKey)
         |
         |  case _ =>
         |     throw("unexpected")
         | }
         |
       """.stripMargin
    )

  private def dApp(letCount: Int) = {
    val body = (1 to letCount).map(i => s"let a$i = 1 ").mkString
    compile(
      s"""
         | {-# STDLIB_VERSION 4 #-}
         | {-# CONTENT_TYPE DAPP #-}
         | {-# SCRIPT_TYPE ACCOUNT #-}
         |
         | $body
       """.stripMargin
    )
  }

  test("big SetScript body bytes") {
    checkByteVectorLimit(firstAddress, verifierV3)
    checkByteVectorLimit(secondAddress, verifierV4)

    (the[RuntimeException] thrownBy dApp(1782)).getMessage shouldBe "Script is too large: 32780 bytes > 32768 bytes"
  }

  private def checkByteVectorLimit(address: String, verifier: String) = {
    val setScriptId = sender.setScript(address, Some(verifier), setScriptFee, waitForTx = true).id
    sender.transactionInfo[TransactionInfo](setScriptId).script.get.startsWith("base64:") shouldBe true

    val scriptInfo = sender.addressScriptInfo(address)
    scriptInfo.script.isEmpty shouldBe false
    scriptInfo.scriptText.isEmpty shouldBe false
    scriptInfo.script.get.startsWith("base64:") shouldBe true

    sender.setScript(address, Some(dApp(1781)), setScriptFee + smartFee, waitForTx = true)
  }
} 
Example 105
Source File: ForgeReturnedToUTXSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.TransactionInfo
import com.wavesplatform.it.transactions.NodesFromDocker
import org.scalatest.{CancelAfterFailure, FunSuite, Matchers}

class ForgeReturnedToUTXSuite extends FunSuite with CancelAfterFailure with NodesFromDocker with Matchers {

  import ForgeReturnedToUTXSuite._
  override protected def nodeConfigs: Seq[Config] = Configs

  private def miner = nodes.head
  private def last  = nodes.last

  test("dependent trasactions can be added to UTX if first mined and returned to UTX") {

    //asset tx should be mined in first microblock as as new keyblock mined, others microblocks should not be applied due to big microblockInterval
    val assetId                      = last.issue(last.address, "asset", "descr", issueAmount, 0, reissuable = false, issueFee, waitForTx = true).id
    val issueAssetInitialHeight: Int = last.transactionInfo[TransactionInfo](assetId).height

    //all microblocks should returned to utx, assetId should be returned to UTX and no any microblocks will be mined on this height
    //so trasfer tx will stay in utx until new keyblock mined
    val transferTx = last.transfer(last.address, miner.address, 1L, minFee, Some(assetId), None, waitForTx = true).id

    val issueAssetHeight = last.transactionInfo[TransactionInfo](assetId).height
    val transferTxHeight = last.transactionInfo[TransactionInfo](transferTx).height

    //trasfer tx and issue asset tx should be placed in the same microblock
    transferTxHeight shouldBe issueAssetHeight
    transferTxHeight shouldNot be(issueAssetInitialHeight)

  }

}

object ForgeReturnedToUTXSuite {
  import com.wavesplatform.it.NodeConfigs._

  //microblock interval should be greater than avarage block interval
  val microblockInterval  = 60
  private val minerConfig = ConfigFactory.parseString(s"""
                                                         |waves {
                                                         |  miner {
                                                         |    micro-block-interval = ${microblockInterval}s
                                                         |    min-micro-block-age = 60s
                                                         |  }
                                                         |  blockchain.custom.genesis {
                                                         |     average-block-delay = 20s
                                                         |  }
                                                         |  miner.quorum = 1
                                                         |}""".stripMargin)

  val Configs: Seq[Config] = Seq(
    minerConfig.withFallback(Default.head),
    minerConfig.withFallback(Default(1))
  )

} 
Example 106
Source File: NetworkUniqueConnectionsTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.network

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.{DockerBased, Node, NodeConfigs, Nodes}
import com.wavesplatform.utils.ScorexLogging
import org.scalatest.{FreeSpec, Matchers}

import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class NetworkUniqueConnectionsTestSuite extends FreeSpec with Matchers with DockerBased with ScorexLogging with Nodes {
  import NetworkUniqueConnectionsTestSuite._

  "nodes should up and connect with each other" in {
    val firstNode = docker.startNode(FirstNodeConfig, autoConnect = false)
    nodes = Seq(firstNode)

    val status = firstNode.status()
    log.trace(s"#### $status")
    assert(status.blockchainHeight >= status.stateHeight)

    val secondNode = {
      // Helps to do an incoming connection: second -> first (1)
      val peersConfig = ConfigFactory.parseString(
        s"""waves.network.known-peers = [
             |  "${firstNode.containerNetworkAddress.getHostName}:${firstNode.containerNetworkAddress.getPort}"
             |]""".stripMargin
      )

      docker.startNode(peersConfig.withFallback(SecondNodeConfig), autoConnect = false)
    }
    nodes = Seq(firstNode, secondNode) // Thread dump workaround
    firstNode.waitForPeers(1)

    // Outgoing connection: first -> second (2)
    firstNode.connect(secondNode.containerNetworkAddress)

    withClue("Should fail with TimeoutException, because the connectionAttempt should fail") {
      Try(firstNode.waitForPeers(2, 30.seconds)) match {
        case Failure(ApiCallException(_: TimeoutException)) => // Pass
        case Failure(exception) => fail(exception)
        case Success(v) => fail(s"Expected TimeoutException, got $v")
      }
    }
  }

  protected var nodes: Seq[Node]         = Nil
  protected def nodeConfigs: Seq[Config] = NetworkUniqueConnectionsTestSuite.configs
}

object NetworkUniqueConnectionsTestSuite {
  private val configs          = NodeConfigs.newBuilder.withDefault(0).withSpecial(2, _.nonMiner).build()
  val FirstNodeConfig: Config  = configs.head
  val SecondNodeConfig: Config = configs.last
} 
Example 107
Source File: SimpleTransactionsSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.network

import java.nio.charset.StandardCharsets

import com.typesafe.config.Config
import com.wavesplatform.account.Address
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.AsyncNetworkApi._
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.network.{RawBytes, TransactionSpec}
import com.wavesplatform.transaction.Asset.Waves
import com.wavesplatform.transaction.transfer._
import org.scalatest._

import scala.concurrent.duration._

class SimpleTransactionsSuite extends BaseTransactionSuite with Matchers {
  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(entitiesNumber = 1)
      .buildNonConflicting()

  private def node = nodes.head

  test("valid tx send by network to node should be in blockchain") {
    val tx = TransferTransaction.selfSigned(1.toByte, node.keyPair, Address.fromString(node.address).explicitGet(), Waves, 1L, Waves, minFee, ByteStr.empty,  System.currentTimeMillis())
      .explicitGet()

    node.sendByNetwork(RawBytes.fromTransaction(tx))
    node.waitForTransaction(tx.id().toString)

  }

  test("invalid tx send by network to node should be not in UTX or blockchain") {
    val tx = TransferTransaction
      .selfSigned(
        1.toByte,
        node.keyPair,
        Address.fromString(node.address).explicitGet(),
        Waves,
        1L,
        Waves,
        minFee,
        ByteStr.empty,
        System.currentTimeMillis() + (1 days).toMillis
      )
      .explicitGet()

    node.sendByNetwork(RawBytes.fromTransaction(tx))
    val maxHeight = nodes.map(_.height).max
    nodes.waitForHeight(maxHeight + 1)
    node.ensureTxDoesntExist(tx.id().toString)
  }

  test("should blacklist senders of non-parsable transactions") {
    val blacklistBefore = node.blacklistedPeers
    node.sendByNetwork(RawBytes(TransactionSpec.messageCode, "foobar".getBytes(StandardCharsets.UTF_8)))
    node.waitForBlackList(blacklistBefore.size)
  }
} 
Example 108
Source File: DetectBrokenConnectionsTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.network

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.NodeConfigs.Default
import com.wavesplatform.it.ReportingTestName
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import org.scalatest.{FreeSpec, Matchers}

import scala.concurrent.duration._

class DetectBrokenConnectionsTestSuite extends FreeSpec with Matchers with ReportingTestName with NodesFromDocker {

  override protected def nodeConfigs: Seq[Config] = {
    val highPriorityConfig = ConfigFactory.parseString("waves.network.break-idle-connections-timeout = 20s")
    Default.take(2).map(highPriorityConfig.withFallback)
  }

  "disconnect nodes from the network and wait a timeout for detecting of broken connections" in {
    dockerNodes().foreach(docker.disconnectFromNetwork)
    Thread.sleep(30.seconds.toMillis)

    dockerNodes().foreach { node =>
      docker.connectToNetwork(Seq(node))
      node.connectedPeers shouldBe empty
      docker.disconnectFromNetwork(node)
    }

    // To prevent errors in the log
    docker.connectToNetwork(dockerNodes())
  }

} 
Example 109
Source File: NotActivateFeatureTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.activation

import com.typesafe.config.Config
import com.wavesplatform.features.BlockchainFeatureStatus
import com.wavesplatform.features.api.{FeatureActivationStatus, NodeFeatureStatus}
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.BlockHeader
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{NodeConfigs, ReportingTestName}
import org.scalatest.{CancelAfterFailure, FreeSpec, Matchers}

class NotActivateFeatureTestSuite
    extends FreeSpec
    with Matchers
    with CancelAfterFailure
    with ActivationStatusRequest
    with ReportingTestName
    with NodesFromDocker {

  private val votingInterval             = 14
  private val blocksForActivation        = 14
  private val votingFeatureNum: Short    = 1
  private val nonVotingFeatureNum: Short = 2

  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(
        _.raw(
          s"""waves {
         |  blockchain {
         |    custom {
         |      functionality {
         |        pre-activated-features = {}
         |        feature-check-blocks-period = $votingInterval
         |        blocks-for-feature-activation = $blocksForActivation
         |      }
         |    }
         |  }
         |  features.supported=[$nonVotingFeatureNum]
         |  miner.quorum = 1
         |}""".stripMargin
        ))
      .withDefault(2)
      .buildNonConflicting()

  private var activationStatusInfoBefore = Seq.empty[FeatureActivationStatus]
  private var activationStatusInfoAfter  = Seq.empty[FeatureActivationStatus]

  "get activation status info" in {
    nodes.waitForHeight(votingInterval - 1)
    activationStatusInfoBefore = nodes.map(_.featureActivationStatus(votingFeatureNum))
    nodes.waitForHeight(votingInterval + 1)
    activationStatusInfoAfter = nodes.map(_.featureActivationStatus(votingFeatureNum))
  }

  "supported blocks is not increased when nobody votes for feature" in {
    val generatedBlocks: Seq[BlockHeader] = nodes.head.blockHeadersSeq(1, votingInterval - 1)
    val featuresMapInGeneratedBlocks       = generatedBlocks.flatMap(b => b.features.getOrElse(Seq.empty)).groupBy(x => x)
    val votesForFeature1                   = featuresMapInGeneratedBlocks.getOrElse(votingFeatureNum, Seq.empty).length

    votesForFeature1 shouldBe 0
    activationStatusInfoBefore.foreach(assertVotingStatus(_, votesForFeature1, BlockchainFeatureStatus.Undefined, NodeFeatureStatus.Implemented))
  }

  "feature is still in VOTING status on the next voting interval" in {
    activationStatusInfoAfter.foreach(assertVotingStatus(_, 0, BlockchainFeatureStatus.Undefined, NodeFeatureStatus.Implemented))
  }

} 
Example 110
Source File: FeatureActivationTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.activation

import com.typesafe.config.Config
import com.wavesplatform.features.api.NodeFeatureStatus
import com.wavesplatform.features.{BlockchainFeatureStatus, BlockchainFeatures}
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{NodeConfigs, ReportingTestName}
import org.scalatest.{CancelAfterFailure, FreeSpec, Matchers}
import com.wavesplatform.it.api.SyncHttpApi._

class FeatureActivationTestSuite
    extends FreeSpec
    with Matchers
    with CancelAfterFailure
    with NodesFromDocker
    with ActivationStatusRequest
    with ReportingTestName {

  private val votingInterval      = 12
  private val blocksForActivation = 12 // should be even
  private val featureNum: Short   = BlockchainFeatures.SmallerMinimalGeneratingBalance.id
  private val featureDescr        = BlockchainFeatures.SmallerMinimalGeneratingBalance.description

  override protected def nodeConfigs: Seq[Config] = {
    NodeConfigs.newBuilder
      .overrideBase(_.raw(s"""waves {
                               |  blockchain.custom.functionality {
                               |    pre-activated-features = {}
                               |    feature-check-blocks-period = $votingInterval
                               |    blocks-for-feature-activation = $blocksForActivation
                               |  }
                               |  features.supported = [$featureNum]
                               |  miner.quorum = 1
                               |}""".stripMargin))
      .withDefault(2)
      .buildNonConflicting()
  }

  "supported blocks increased when voting starts" in {
    nodes.waitForHeight(votingInterval * 2 / 3)
    val status = nodes.map(_.featureActivationStatus(featureNum))
    status.foreach { s =>
      s.description shouldBe featureDescr
      assertVotingStatus(s, s.supportingBlocks.get, BlockchainFeatureStatus.Undefined, NodeFeatureStatus.Voted)
    }
  }

  "supported blocks counter resets on the next voting interval" in {
    nodes.waitForHeight(votingInterval * 2 - blocksForActivation / 2)
    val info = nodes.map(_.featureActivationStatus(featureNum))
    info.foreach(i => i.blockchainStatus shouldBe BlockchainFeatureStatus.Undefined)
  }

  "blockchain status is APPROVED in second voting interval" in {
    val checkHeight = votingInterval * 2
    nodes.waitForHeight(checkHeight)
    val statusInfo = nodes.map(_.featureActivationStatus(featureNum))
    statusInfo.foreach { si =>
      si.description shouldBe featureDescr
      // Activation will be on a next voting interval
      assertApprovedStatus(si, checkHeight + votingInterval, NodeFeatureStatus.Voted)
    }
  }

  "blockchain status is ACTIVATED in third voting interval" in {
    val checkHeight = votingInterval * 3
    nodes.waitForHeight(checkHeight)
    val statusInfo = nodes.map(_.featureActivationStatus(featureNum))
    statusInfo.foreach { si =>
      si.description shouldBe featureDescr
      assertActivatedStatus(si, checkHeight, NodeFeatureStatus.Implemented)
    }
  }
} 
Example 111
Source File: AssetsApiGrpcSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.grpc

import com.typesafe.config.Config
import com.wavesplatform.it.sync._
import com.wavesplatform.it.sync.activation.ActivationStatusRequest
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{GrpcIntegrationSuiteWithThreeAddress, NodeConfigs, ReportingTestName}
import org.scalatest.{FreeSpec, Matchers}

class AssetsApiGrpcSuite
    extends FreeSpec
    with Matchers
    with NodesFromDocker
    with ActivationStatusRequest
    with ReportingTestName
    with GrpcIntegrationSuiteWithThreeAddress {

  "nftList returns all NFT" in {
    import com.wavesplatform.it.api.SyncGrpcApi._

    val txs = Map(
      "non_nft_asset" -> sender.broadcastIssue(firstAcc, "non_nft_asset", 100, 8, reissuable = true, issueFee + smartFee),
      "nft_asset_1"   -> sender.broadcastIssue(firstAcc, "nft_asset_1", 1, 0, reissuable = false, issueFee + smartFee),
      "nft_asset_2"   -> sender.broadcastIssue(firstAcc, "nft_asset_2", 1, 0, reissuable = false, issueFee + smartFee)
    )

    txs.values.foreach(tx => sender.waitForTransaction(tx.id))

    val allNft = Map(
      "nft_asset_1" -> txs("nft_asset_1").id,
      "nft_asset_2" -> txs("nft_asset_2").id
    )

    val nftList = sender.nftList(firstAddress, 10)
    nftList should have size 2
    nftList.map(_.assetInfo.get.name) should contain theSameElementsAs allNft.keySet
  }

  override def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .buildNonConflicting()
} 
Example 112
Source File: GrpcBaseTransactionSuiteLike.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.grpc

import java.io.File

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{ExternalNode, GrpcIntegrationSuiteWithThreeAddress, GrpcWaitForHeight, Node, NodeConfigs}
import monix.eval.Coeval
import org.scalatest.{BeforeAndAfterAll, FunSuite, Suite}

import scala.concurrent.ExecutionContext
import scala.jdk.CollectionConverters._

trait GrpcBaseTransactionSuiteLike
  extends GrpcWaitForHeight
  with GrpcIntegrationSuiteWithThreeAddress
  with BeforeAndAfterAll
  with NodesFromDocker { this: Suite =>

  protected implicit val ec: ExecutionContext = ExecutionContext.Implicits.global

  protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .withSpecial(_.nonMiner)
      .buildNonConflicting()

  // protected because https://github.com/sbt/zinc/issues/292
  protected val theNodes: Coeval[Seq[Node]] = Coeval.evalOnce {
    Option(System.getProperty("waves.it.config.file")) match {
      case None => dockerNodes()
      case Some(filePath) =>
        val defaultConfig = ConfigFactory.load()
        ConfigFactory
          .parseFile(new File(filePath))
          .getConfigList("nodes")
          .asScala
          .toSeq
          .map(cfg => new ExternalNode(cfg.withFallback(defaultConfig).resolve()))
    }
  }

  protected override def beforeAll(): Unit = {
    theNodes.run
    super.beforeAll()
  }
}

abstract class GrpcBaseTransactionSuite extends FunSuite with GrpcBaseTransactionSuiteLike 
Example 113
Source File: BlockV5GrpcSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.grpc

import com.google.protobuf.ByteString
import com.typesafe.config.Config
import com.wavesplatform.api.grpc.BlockRangeRequest
import com.wavesplatform.block.Block
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto
import com.wavesplatform.it.api.SyncGrpcApi._
import com.wavesplatform.it.sync.activation.ActivationStatusRequest
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{GrpcIntegrationSuiteWithThreeAddress, NodeConfigs, ReportingTestName}
import org.scalatest.{CancelAfterFailure, FreeSpec, Matchers, OptionValues}

import scala.concurrent.duration._

class BlockV5GrpcSuite
    extends FreeSpec
    with Matchers
    with CancelAfterFailure
    with NodesFromDocker
    with ActivationStatusRequest
    with ReportingTestName
    with OptionValues
    with GrpcIntegrationSuiteWithThreeAddress {

  override def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .withSpecial(1, _.nonMiner)
      .buildNonConflicting()

  "block v5 appears and blockchain grows" - {
    "when feature activation happened" in {
      sender.waitForHeight(sender.height + 1, 2.minutes)
      val currentHeight = sender.height

      val blockV5     = sender.blockAt(currentHeight)
      val blockV5ById = sender.blockById(ByteString.copyFrom(blockV5.id().arr))

      blockV5.header.version shouldBe Block.ProtoBlockVersion
      blockV5.id().arr.length shouldBe crypto.DigestLength
      blockV5.signature.arr.length shouldBe crypto.SignatureLength
      blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
      assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
      blockV5ById.header.version shouldBe Block.ProtoBlockVersion
      blockV5ById.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
      assert(blockV5ById.transactionsRootValid(), "transactionsRoot is not valid")

      sender.waitForHeight(currentHeight + 1, 2.minutes)

      val blockAfterVRFUsing     = sender.blockAt(currentHeight + 1)
      val blockAfterVRFUsingById = sender.blockById(ByteString.copyFrom(blockAfterVRFUsing.id().arr))

      blockAfterVRFUsing.header.version shouldBe Block.ProtoBlockVersion
      blockAfterVRFUsing.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
      ByteStr(sender.blockHeaderAt(currentHeight + 1).reference.toByteArray) shouldBe blockV5.id()
      blockAfterVRFUsingById.header.version shouldBe Block.ProtoBlockVersion
      blockAfterVRFUsingById.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
      assert(blockAfterVRFUsingById.transactionsRootValid(), "transactionsRoot is not valid")

      val blockSeqOfBlocksV5 = sender.blockSeq(currentHeight, currentHeight + 2)

      for (blockV5 <- blockSeqOfBlocksV5) {
        blockV5.header.version shouldBe Block.ProtoBlockVersion
        blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
        assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
      }

      val blockSeqOfBlocksV5ByAddress = sender.blockSeqByAddress(miner.address, currentHeight, currentHeight + 2)

      for (blockV5 <- blockSeqOfBlocksV5ByAddress) {
        blockV5.header.generator shouldBe miner.keyPair.publicKey
        blockV5.header.version shouldBe Block.ProtoBlockVersion
        blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
        assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
      }

      val blockSeqOfBlocksV5ByPKGrpc = NodeExtGrpc(sender).blockSeq(
        currentHeight,
        currentHeight + 2,
        BlockRangeRequest.Filter.GeneratorPublicKey(ByteString.copyFrom(miner.keyPair.publicKey.arr))
      )

      for (blockV5 <- blockSeqOfBlocksV5ByPKGrpc) {
        blockV5.header.generator shouldBe miner.keyPair.publicKey
        blockV5.header.version shouldBe Block.ProtoBlockVersion
        blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
        assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
      }
    }
  }
} 
Example 114
Source File: MicroblocksSponsoredFeeTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.Config
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.state.Sponsorship
import com.wavesplatform.state.diffs.FeeValidation
import com.wavesplatform.utils.ScorexLogging
import org.scalatest.{CancelAfterFailure, FreeSpec, Matchers}

class MicroblocksSponsoredFeeTestSuite extends FreeSpec with Matchers with CancelAfterFailure with NodesFromDocker with ScorexLogging {

  private def notMiner = nodes.head

  val sponsor           = nodes(1)
  val Token             = 100L
  val sponsorAssetTotal = 100000 * Token
  val minSponsorFee     = Token
  val SmallFee          = Token + Token / 2

  private def secondAddress = nodes(2).address

  private def txRequestsGen(n: Int, sponsorAssetId: String): Unit = {
    1 to n map (_ => {
      sponsor.transfer(sponsor.address, secondAddress, Token, fee = SmallFee, None, Some(sponsorAssetId))
    })
  }

  "fee distribution with sponsorship" - {
    val sponsorAssetId = sponsor
      .issue(sponsor.address, "SponsoredAsset", "Created by Sponsorship Suite", sponsorAssetTotal, decimals = 2, reissuable = false, fee = issueFee)
      .id
    nodes.waitForHeightAriseAndTxPresent(sponsorAssetId)

    val transferTxToSecondAddress = sponsor.transfer(sponsor.address, secondAddress, sponsorAssetTotal / 2, minFee, Some(sponsorAssetId), None).id
    nodes.waitForHeightAriseAndTxPresent(transferTxToSecondAddress)

    val sponsorId = sponsor.sponsorAsset(sponsor.address, sponsorAssetId, baseFee = Token, fee = sponsorReducedFee).id
    nodes.waitForHeightAriseAndTxPresent(sponsorId)

    "check fee distribution" in {
      val height = nodes.waitForHeightArise()

      txRequestsGen(50, sponsorAssetId)
      nodes.waitForHeight(height + 2)

      val blockHeadersSeq = notMiner.blockHeadersSeq(height - 1, height + 2)

      val filteredBlocks = blockHeadersSeq
        .zip(blockHeadersSeq.drop(1))
        .withFilter(t => t._1.transactionCount != t._2.transactionCount)
        .map(_._1) :+ blockHeadersSeq.last

      val filteredBlocksFee        = filteredBlocks.map(b => b.transactionCount * FeeValidation.FeeUnit * SmallFee / minSponsorFee)
      val minerBalances: Seq[Long] = filteredBlocks.map(b => notMiner.debugStateAt(b.height)(b.generator))

      minerBalances.zip(filteredBlocksFee).sliding(2).foreach {
        case Seq((minerBalance1, blockFee1), (minerBalance2, blockFee2)) =>
          minerBalance2 should be(minerBalance1 + blockFee1 * 6 / 10 + blockFee2 * 4 / 10)
      }

      val block   = notMiner.blockAt(height)
      val realFee = block.transactions.map(tx => Sponsorship.toWaves(tx.fee, Token)).sum
      blockHeadersSeq(1).totalFee shouldBe realFee
    }
  }

  override def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .overrideBase(_.raw("waves.blockchain.custom.functionality.blocks-for-feature-activation=1"))
      .overrideBase(_.raw("waves.blockchain.custom.functionality.feature-check-blocks-period=1"))
      .overrideBase(_.preactivatedFeatures((14, 1000000)))
      .withDefault(1)
      .withSpecial(2, _.nonMiner)
      .buildNonConflicting()

} 
Example 115
Source File: LeaseStatusTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.transactions

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.api.http.TransactionsApiRoute.LeaseStatus
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import org.scalatest.CancelAfterFailure
import play.api.libs.json.Json

class LeaseStatusTestSuite extends BaseTransactionSuite with CancelAfterFailure {
  import LeaseStatusTestSuite._

  override protected def nodeConfigs: Seq[Config] = Configs

  test("verification of leasing status") {
    val createdLeaseTxId = sender.lease(firstAddress, secondAddress, leasingAmount, leasingFee = minFee).id
    nodes.waitForHeightAriseAndTxPresent(createdLeaseTxId)
    val status = getStatus(createdLeaseTxId)
    status shouldBe LeaseStatus.Active

    val cancelLeaseTxId = sender.cancelLease(firstAddress, createdLeaseTxId, fee = minFee).id
    miner.waitForTransaction(cancelLeaseTxId)
    nodes.waitForHeightArise()
    val status1 = getStatus(createdLeaseTxId)
    status1 shouldBe LeaseStatus.Canceled
    val sizeActiveLeases = sender.activeLeases(firstAddress).size
    sizeActiveLeases shouldBe 0
  }

  private def getStatus(txId: String): String = {
    val r = sender.get(s"/transactions/info/$txId")
    (Json.parse(r.getResponseBody) \ "status").as[String]

  }
}

object LeaseStatusTestSuite {
  private val blockGenerationOffset = "10000ms"
  import com.wavesplatform.it.NodeConfigs.Default

  private val minerConfig = ConfigFactory.parseString(s"""waves {
       |   miner{
       |      enable = yes
       |      minimal-block-generation-offset = $blockGenerationOffset
       |      quorum = 0
       |      micro-block-interval = 3s
       |      max-transactions-in-key-block = 0
       |   }
       |}
     """.stripMargin)

  private val notMinerConfig = ConfigFactory.parseString(s"""waves {
       |   miner.enable = no
       |   miner.minimal-block-generation-offset = $blockGenerationOffset
       |}
     """.stripMargin)

  val Configs: Seq[Config] = Seq(
    minerConfig.withFallback(Default.head),
    notMinerConfig.withFallback(Default(1))
  )

} 
Example 116
Source File: ReissueTransactionSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.transactions

import com.typesafe.config.Config
import com.wavesplatform.account.AddressScheme
import com.wavesplatform.api.http.ApiError.StateCheckFailed
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.TransactionInfo
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.it.util._
import com.wavesplatform.transaction.assets.ReissueTransaction

class ReissueTransactionSuite extends BaseTransactionSuite {

  test("asset reissue changes issuer's asset balance; issuer's waves balance is decreased by fee") {
    for (v <- reissueTxSupportedVersions) {
      val (balance, effectiveBalance) = miner.accountBalances(firstAddress)

      val issuedAssetId = sender.issue(firstAddress, "name2", "description2", someAssetAmount, decimals = 2, reissuable = true, issueFee).id
      nodes.waitForHeightAriseAndTxPresent(issuedAssetId)
      miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee)
      miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount)

      val reissueTx = sender.reissue(firstAddress, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueReducedFee, version = v)
      nodes.waitForHeightAriseAndTxPresent(reissueTx.id)
      if (v > 2) {
        reissueTx.chainId shouldBe Some(AddressScheme.current.chainId)
        sender.transactionInfo[TransactionInfo](reissueTx.id).chainId shouldBe Some(AddressScheme.current.chainId)
      }
      miner.assertBalances(firstAddress, balance - issueFee - reissueReducedFee, effectiveBalance - issueFee - reissueReducedFee)
      miner.assertAssetBalance(firstAddress, issuedAssetId, 2 * someAssetAmount)
    }

    miner.transactionsByAddress(firstAddress, limit = 100)
      .count(_._type == ReissueTransaction.typeId) shouldBe reissueTxSupportedVersions.length
  }

  test("can't reissue not reissuable asset") {
    for (v <- reissueTxSupportedVersions) {
      val (balance, effectiveBalance) = miner.accountBalances(firstAddress)

      val issuedAssetId = sender.issue(firstAddress, "name2", "description2", someAssetAmount, decimals = 2, reissuable = false, issueFee).id
      nodes.waitForHeightAriseAndTxPresent(issuedAssetId)
      miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee)
      miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount)

      assertBadRequestAndMessage(
        sender.reissue(firstAddress, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueReducedFee, version = v),
        "Asset is not reissuable"
      )
      nodes.waitForHeightArise()

      miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount)
      miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee)
    }
  }

  test("not able to reissue if cannot pay fee - less than required") {
    for (v <- reissueTxSupportedVersions) {
      val issuedAssetId = sender.issue(firstAddress, "name3", "description3", someAssetAmount, decimals = 2, reissuable = true, issueFee).id

      nodes.waitForHeightAriseAndTxPresent(issuedAssetId)

      assertApiError(sender.reissue(firstAddress, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueReducedFee - 1, version = v)) { error =>
        error.id shouldBe StateCheckFailed.Id
        error.message should include(s"Fee for ReissueTransaction (${reissueReducedFee - 1} in WAVES) does not exceed minimal value of $reissueReducedFee WAVES.")
      }
    }
  }

  test("not able to reissue if cannot pay fee - insufficient funds") {
    for (v <- reissueTxSupportedVersions) {
      val (balance, effectiveBalance) = miner.accountBalances(firstAddress)
      val reissueFee = effectiveBalance + 1.waves

      val issuedAssetId = sender.issue(firstAddress, "name4", "description4", someAssetAmount, decimals = 2, reissuable = true, issueFee).id

      nodes.waitForHeightAriseAndTxPresent(issuedAssetId)

      assertBadRequestAndMessage(
        sender.reissue(firstAddress, issuedAssetId, someAssetAmount, reissuable = true, fee = reissueFee, version = v),
        "Accounts balance errors"
      )
      nodes.waitForHeightArise()

      miner.assertAssetBalance(firstAddress, issuedAssetId, someAssetAmount)
      miner.assertBalances(firstAddress, balance - issueFee, effectiveBalance - issueFee)
    }
  }

  override protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .withSpecial(_.nonMiner)
      .buildNonConflicting()
} 
Example 117
Source File: SetAssetScriptTxFeatureSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.transactions

import com.typesafe.config.Config
import com.wavesplatform.features.{BlockchainFeatureStatus, BlockchainFeatures}
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync.{issueFee, scriptBase64, setAssetScriptFee, someAssetAmount}
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.common.utils._
import com.wavesplatform.lang.v1.estimator.v2.ScriptEstimatorV2
import com.wavesplatform.transaction.smart.script.ScriptCompiler

class SetAssetScriptTxFeatureSuite extends BaseTransactionSuite {

  private val featureActivationHeight = 8

  private var assetId = ""

  override def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .overrideBase(_.raw(s"""waves {
                             |  blockchain.custom.functionality {
                             |    pre-activated-features = {
                             |      ${BlockchainFeatures.SmartAssets.id} = $featureActivationHeight
                             |    }
                             |    
                             |  }
                             |}""".stripMargin))
      .withDefault(1)
      .withSpecial(_.nonMiner)
      .buildNonConflicting()

  override def beforeAll(): Unit = {
    super.beforeAll()

    assetId = sender
      .issue(
        firstAddress,
        "SetAssetScript",
        "Test coin for SetAssetScript tests",
        someAssetAmount,
        0,
        reissuable = false,
        issueFee,
        2,
        Some(scriptBase64)
      )
      .id

    nodes.waitForHeightAriseAndTxPresent(assetId)
  }

  test("cannot transact without activated feature") {
    assertBadRequestAndResponse(
      sender.setAssetScript(assetId, firstAddress, setAssetScriptFee, Some(scriptBase64)).id,
      s"${BlockchainFeatures.SmartAssets.description} feature has not been activated yet"
    )
  }

  test("can transact after feature activation") {
    nodes.waitForHeight(featureActivationHeight)

    sender.featureActivationStatus(BlockchainFeatures.SmartAssets.id).blockchainStatus shouldBe BlockchainFeatureStatus.Activated

    val script = ScriptCompiler
      .compile(
        s"""
           |match tx {
           |  case _: SetAssetScriptTransaction => true
           |  case _ => false
           |}""".stripMargin,
        ScriptEstimatorV2
      )
      .explicitGet()
      ._1
      .bytes
      .value
      .base64

    val txId = sender
      .setAssetScript(
        assetId,
        firstAddress,
        setAssetScriptFee,
        Some(script)
      )
      .id

    nodes.waitForHeightAriseAndTxPresent(txId)
  }
} 
Example 118
Source File: RebroadcastTransactionSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.transactions

import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory.parseString
import com.wavesplatform.account.Address
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.Node
import com.wavesplatform.it.NodeConfigs._
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.{BaseTransactionSuite, NodesFromDocker}
import com.wavesplatform.transaction.Asset.Waves
import com.wavesplatform.transaction.transfer.TransferTransaction

class RebroadcastTransactionSuite extends BaseTransactionSuite with NodesFromDocker {

  import RebroadcastTransactionSuite._

  override protected def nodeConfigs: Seq[Config] =
    Seq(configWithRebroadcastAllowed.withFallback(NotMiner), configWithRebroadcastAllowed.withFallback(Miners.head))

  private def nodeA: Node = nodes.head
  private def nodeB: Node = nodes.last

  test("should rebroadcast a transaction if that's allowed in config") {
    val tx = TransferTransaction.selfSigned(2.toByte, nodeA.keyPair, Address.fromString(nodeB.address).explicitGet(), Waves, transferAmount, Waves, minFee, ByteStr.empty,  System.currentTimeMillis())
      .explicitGet()
      .json()

    val dockerNodeBId = docker.stopContainer(dockerNodes.apply().last)
    val txId          = nodeA.signedBroadcast(tx).id
    docker.startContainer(dockerNodeBId)
    nodeA.waitForPeers(1)

    nodeB.ensureTxDoesntExist(txId)
    nodeA.signedBroadcast(tx)
    nodeB.waitForUtxIncreased(0)
    nodeB.utxSize shouldBe 1
  }

  test("should not rebroadcast a transaction if that's not allowed in config") {
    dockerNodes().foreach(docker.restartNode(_, configWithRebroadcastNotAllowed))
    val tx = TransferTransaction
      .selfSigned(2.toByte, nodeA.keyPair, Address.fromString(nodeB.address).explicitGet(), Waves, transferAmount, Waves, minFee, ByteStr.empty,  System.currentTimeMillis())
      .explicitGet()
      .json()

    val dockerNodeBId = docker.stopContainer(dockerNodes.apply().last)
    val txId          = nodeA.signedBroadcast(tx).id
    docker.startContainer(dockerNodeBId)
    nodeA.waitForPeers(1)

    nodeB.ensureTxDoesntExist(txId)
    nodeA.signedBroadcast(tx)
    nodes.waitForHeightArise()
    nodeB.utxSize shouldBe 0
    nodeB.ensureTxDoesntExist(txId)

  }
}
object RebroadcastTransactionSuite {

  private val configWithRebroadcastAllowed =
    parseString("waves.synchronization.utx-synchronizer.allow-tx-rebroadcasting = true")

  private val configWithRebroadcastNotAllowed =
    parseString("waves.synchronization.utx-synchronizer.allow-tx-rebroadcasting = false")

} 
Example 119
Source File: RideCreateMerkleRootTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.Config
import com.wavesplatform.account._
import com.wavesplatform.common.merkle.Merkle
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.{Base58, EitherExt2}
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.Transaction
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{Node, NodeConfigs, ReportingTestName, TransferSending}
import com.wavesplatform.lang.v1.compiler.Terms._
import com.wavesplatform.lang.v1.estimator.v3.ScriptEstimatorV3
import com.wavesplatform.state._
import com.wavesplatform.transaction.Asset._
import com.wavesplatform.transaction.{Proofs, TxVersion}
import com.wavesplatform.transaction.smart.script.ScriptCompiler
import com.wavesplatform.transaction.transfer.TransferTransaction
import org.scalatest.prop.TableDrivenPropertyChecks
import org.scalatest.{CancelAfterFailure, FunSuite, Matchers}


class RideCreateMerkleRootTestSuite
    extends FunSuite
    with CancelAfterFailure
    with TransferSending
    with NodesFromDocker
    with ReportingTestName
    with Matchers
    with TableDrivenPropertyChecks {
  override def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .overrideBase(_.preactivatedFeatures((14, 1000000), BlockchainFeatures.NG.id.toInt -> 0, BlockchainFeatures.FairPoS.id.toInt -> 0, BlockchainFeatures.Ride4DApps.id.toInt -> 0, BlockchainFeatures.BlockV5.id.toInt -> 0))
      .withDefault(1)
      .buildNonConflicting()

  private def sender: Node         = nodes.last

  test("Ride createMerkleRoot") {
    val script =  """
        |{-# STDLIB_VERSION 4 #-}
        |{-# CONTENT_TYPE DAPP #-}
        |
        | @Callable(inv)
        |func foo(proof: List[ByteVector], id: ByteVector, index: Int) = [
        | BinaryEntry("root", createMerkleRoot(proof, id, index))
        |]
        """.stripMargin
    val cscript = ScriptCompiler.compile(script, ScriptEstimatorV3).explicitGet()._1.bytes().base64
    val node = nodes.head
    nodes.waitForHeightArise()
    val tx1 = node.broadcastTransfer(node.keyPair, sender.address, setScriptFee, minFee, None, None, version = TxVersion.V3, waitForTx = false)
    val txId1 = tx1.id
    val tx2 = node.broadcastTransfer(node.keyPair, node.address, 1, minFee, None, None, version = TxVersion.V3, waitForTx = false)
    val txId2 = tx2.id
    val tx3 = node.broadcastTransfer(node.keyPair, node.address, 1, minFee, None, None, version = TxVersion.V3, waitForTx = false)
    val txId3 = tx3.id
    val tx4 = node.broadcastTransfer(node.keyPair, node.address, 1, minFee, None, None, version = TxVersion.V3, waitForTx = false)
    val txId4 = tx4.id
    val tx5 = node.broadcastTransfer(node.keyPair, node.address, 1, minFee, None, None, version = TxVersion.V3, waitForTx = false)
    val txId5 = tx5.id

    val height = node.height

    nodes.waitForHeightArise()

    def tt(tx: Transaction) = TransferTransaction.create(
      tx.version.get,
      PublicKey(Base58.decode(tx.senderPublicKey.get)),
      Address.fromString(tx.recipient.get).explicitGet(),
      Waves ,
      tx.fee, ByteStr.empty,  // attachment
      tx.timestamp,
      Proofs(tx.proofs.get.map(v => ByteStr(Base58.decode(v))))
      ).explicitGet()
    val natives = Seq(tx1, tx2, tx3, tx4, tx5).map(tt).map(t => Base58.encode(t.id().arr) -> t).toMap

    val root = Base58.decode(node.blockAt(height).transactionsRoot.get)

    val proofs = nodes.head.getMerkleProof(txId1, txId2, txId3, txId4, txId5)

    sender.setScript(sender.address, Some(cscript), setScriptFee, waitForTx = true).id

    for(p <- proofs) {
      node.invokeScript(
        node.address,
        sender.address,
        func = Some("foo"),
        args = List(ARR(p.merkleProof.map(v => CONST_BYTESTR(ByteStr(Base58.decode(v))).explicitGet()).toIndexedSeq, false).explicitGet(),
                    CONST_BYTESTR(ByteStr(Merkle.hash(natives(p.id).bytes()))).explicitGet(),
                    CONST_LONG(p.transactionIndex.toLong)),
        payment = Seq(),
        fee = 2*smartFee+minFee,
        waitForTx = true
      )
      node.getDataByKey(sender.address, "root") shouldBe BinaryDataEntry("root", ByteStr(root))
    }
  }
} 
Example 120
Source File: MinerStateTestSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.api.State
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.util._
import org.scalatest.{CancelAfterFailure, FunSuite, Matchers}
import scala.concurrent.duration._

class MinerStateTestSuite extends FunSuite with CancelAfterFailure with NodesFromDocker with Matchers {
  import MinerStateTestSuite._

  override protected def nodeConfigs: Seq[Config] = Configs

  private val transferAmount = 1000.waves

  private def miner = nodes.head
  private def last  = nodes.last

  test("node w/o balance can forge blocks after effective balance increase") {
    val newAddress = last.createAddress()

    val (balance1, eff1)        = miner.accountBalances(miner.address)
    val minerFullBalanceDetails = miner.balanceDetails(miner.address)
    assert(balance1 == minerFullBalanceDetails.available)
    assert(eff1 == minerFullBalanceDetails.effective)

    val (balance2, eff2)     = last.accountBalances(newAddress)
    val newAccBalanceDetails = last.balanceDetails(newAddress)
    assert(balance2 == newAccBalanceDetails.available)
    assert(eff2 == newAccBalanceDetails.effective)

    val minerInfoBefore = last.debugMinerInfo()
    all(minerInfoBefore) shouldNot matchPattern { case State(`newAddress`, _, ts) if ts > 0 => }

    miner.waitForPeers(1)
    val txId = miner.transfer(miner.address, newAddress, transferAmount, minFee).id
    nodes.waitForHeightAriseAndTxPresent(txId)

    val heightAfterTransfer = miner.height

    last.assertBalances(newAddress, balance2 + transferAmount, eff2 + transferAmount)

    last.waitForHeight(heightAfterTransfer + 51, 6.minutes) // if you know how to reduce waiting time, please ping @monroid

    assert(last.balanceDetails(newAddress).generating == balance2 + transferAmount)

    val minerInfoAfter = last.debugMinerInfo()
    atMost(1, minerInfoAfter) should matchPattern { case State(`newAddress`, _, ts) if ts > 0 => }

    last.waitForPeers(1)
    val leaseBack = last.lease(newAddress, miner.address, (transferAmount - minFee), minFee).id
    nodes.waitForHeightAriseAndTxPresent(leaseBack)

    assert(last.balanceDetails(newAddress).generating == balance2)

    all(miner.debugMinerInfo()) shouldNot matchPattern { case State(`newAddress`, _, ts) if ts > 0 => }

    all(last.debugMinerInfo()) shouldNot matchPattern { case State(`newAddress`, _, ts) if ts > 0 => }

  }
}

object MinerStateTestSuite {
  import com.wavesplatform.it.NodeConfigs._
  private val minerConfig = ConfigFactory.parseString(s"""
    |waves {
    |  synchronization.synchronization-timeout = 10s
    |  blockchain.custom.functionality {
    |    pre-activated-features.1 = 0
    |    generation-balance-depth-from-50-to-1000-after-height = 100
    |  }
    |  blockchain.custom.genesis {
    |     average-block-delay = 5s
    |  }
    |  miner.quorum = 1
    |}""".stripMargin)

  val Configs: Seq[Config] = Seq(
    minerConfig.withFallback(Default.head),
    minerConfig.withFallback(Default(1))
  )

} 
Example 121
Source File: MicroblocksGenerationSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.async

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.api.AsyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{NodeConfigs, TransferSending}
import org.scalatest._

import scala.concurrent.Await.result
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class MicroblocksGenerationSuite extends FreeSpec with Matchers with TransferSending with NodesFromDocker {
  import MicroblocksGenerationSuite._

  override protected val nodeConfigs: Seq[Config] =
    Seq(ConfigOverrides.withFallback(NodeConfigs.randomMiner))

  private val nodeAddresses = nodeConfigs.map(_.getString("address")).toSet

  private def miner = nodes.head

  s"Generate transactions and wait for one block with $maxTxs txs" in result(
    for {
      uploadedTxs <- processRequests(generateTransfersToRandomAddresses(maxTxs, nodeAddresses))
      _           <- miner.waitForHeight(3)
      block       <- miner.blockAt(2)
    } yield {
      block.transactions.size shouldBe maxTxs

      val blockTxs = block.transactions.map(_.id)
      val diff     = uploadedTxs.map(_.id).toSet -- blockTxs
      diff shouldBe empty
    },
    3.minutes
  )

}

object MicroblocksGenerationSuite {
  private val txsInMicroBlock = 200
  private val maxTxs          = 2000
  private val ConfigOverrides = ConfigFactory.parseString(s"""waves {
                                                             |    miner {
                                                             |      quorum = 0
                                                             |      minimal-block-generation-offset = 1m
                                                             |      micro-block-interval = 3s
                                                             |      max-transactions-in-key-block = 0
                                                             |      max-transactions-in-micro-block = $txsInMicroBlock
                                                             |    }
                                                             |
                                                             |    blockchain.custom.functionality.pre-activated-features.2 = 0
                                                             |    features.supported = [2]
                                                             |}""".stripMargin)
} 
Example 122
Source File: BlockSizeConstraintsSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.async

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.api.AsyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{NodeConfigs, TransferSending}
import org.scalatest._

import scala.concurrent.Await.result
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.Future

class BlockSizeConstraintsSuite extends FreeSpec with Matchers with TransferSending with NodesFromDocker {
  import BlockSizeConstraintsSuite._

  override protected val nodeConfigs: Seq[Config] =
    Seq(ConfigOverrides.withFallback(NodeConfigs.randomMiner))

  private val nodeAddresses = nodeConfigs.map(_.getString("address")).toSet
  private val miner         = nodes.head

  val transfers = generateTransfersToRandomAddresses(maxTxsGroup, nodeAddresses)
  s"Block is limited by size after activation" in result(
    for {
      _                 <- Future.sequence((0 to maxGroups).map(_ => processRequests(transfers, includeAttachment = true)))
      _                 <- miner.waitForHeight(3)
      _                 <- Future.sequence((0 to maxGroups).map(_ => processRequests(transfers, includeAttachment = true)))
      blockHeaderBefore <- miner.blockHeadersAt(2)
      _                 <- miner.waitForHeight(4)
      blockHeaderAfter  <- miner.blockHeadersAt(3)
    } yield {
      val maxSizeInBytesAfterActivation = (1.1d * 1024 * 1024).toInt // including headers
      val blockSizeInBytesBefore        = blockHeaderBefore.blocksize
      blockSizeInBytesBefore should be > maxSizeInBytesAfterActivation

      val blockSizeInBytesAfter = blockHeaderAfter.blocksize
      blockSizeInBytesAfter should be <= maxSizeInBytesAfterActivation
    },
    10.minutes
  )

}

object BlockSizeConstraintsSuite {
  private val maxTxsGroup     = 500 // More, than 1mb of block
  private val maxGroups       = 9
  private val txsInMicroBlock = 500
  private val ConfigOverrides = ConfigFactory.parseString(s"""akka.http.server {
                                                             |  parsing.max-content-length = 3737439
                                                             |  request-timeout = 60s
                                                             |}
                                                             |
                                                             |waves {
                                                             |  network.enable-peers-exchange = no
                                                             |
                                                             |  miner {
                                                             |    quorum = 0
                                                             |    minimal-block-generation-offset = 60000ms
                                                             |    micro-block-interval = 1s
                                                             |    max-transactions-in-key-block = 0
                                                             |    max-transactions-in-micro-block = $txsInMicroBlock
                                                             |  }
                                                             |
                                                             |  blockchain.custom {
                                                             |    functionality {
                                                             |      feature-check-blocks-period = 1
                                                             |      blocks-for-feature-activation = 1
                                                             |
                                                             |      pre-activated-features {
                                                             |        2: 0
                                                             |        3: 2
                                                             |      }
                                                             |    }
                                                             |
                                                             |    store-transactions-in-state = false
                                                             |  }
                                                             |
                                                             |  features.supported = [2, 3]
                                                             |}""".stripMargin)

} 
Example 123
Source File: SmartTransactionsConstraintsSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.async

import com.typesafe.config.Config
import com.wavesplatform.account.KeyPair
import com.wavesplatform.api.http.requests.SignedSetScriptRequest
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.api.AsyncHttpApi._
import com.wavesplatform.it.transactions.NodesFromDocker
import com.wavesplatform.it.{NodeConfigs, TransferSending}
import com.wavesplatform.lang.directives.values.V1
import com.wavesplatform.lang.script.v1.ExprScript
import com.wavesplatform.lang.v1.compiler.Terms
import com.wavesplatform.mining.MiningConstraints.MaxScriptRunsInBlock
import com.wavesplatform.transaction.TxVersion
import com.wavesplatform.transaction.smart.SetScriptTransaction
import org.scalatest._
import play.api.libs.json.{JsNumber, Json}

import scala.concurrent.Await.result
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class SmartTransactionsConstraintsSuite extends FreeSpec with Matchers with TransferSending with NodesFromDocker {

  override protected val nodeConfigs: Seq[Config] = NodeConfigs.newBuilder
    .overrideBase(
      _.raw(
        s"""akka.http.server {
         |  parsing.max-content-length = 3737439
         |  request-timeout = 60s
         |}
         |
         |waves {
         |  miner.quorum = 0
         |
         |  blockchain.custom {
         |    functionality {
         |      pre-activated-features {
         |        2: 0
         |        4: 0
         |        11: 100500
         |      }
         |    }
         |  }
         |}""".stripMargin
      )
    )
    .withDefault(1)
    .build(false)

  private def miner                   = nodes.head
  private val smartPrivateKey  = KeyPair.fromSeed(NodeConfigs.Default(1).getString("account-seed")).explicitGet()
  private val simplePrivateKey = KeyPair.fromSeed(NodeConfigs.Default(2).getString("account-seed")).explicitGet()

  s"Block is limited by size after activation" in result(
    for {
      _ <- miner.signedBroadcast(Json.toJsObject(toRequest(setScriptTx(smartPrivateKey))) + ("type" -> JsNumber(13)))
      _ <- processRequests(generateTransfersFromAccount(MaxScriptRunsInBlock * 3, smartPrivateKey.toAddress.toString))
      _ <- miner.waitForHeight(5)
      _ <- processRequests(generateTransfersFromAccount(MaxScriptRunsInBlock * 3, smartPrivateKey.toAddress.toString))
      _ <- scala.concurrent.Future.sequence((0 to 9).map(_ =>
        processRequests(generateTransfersFromAccount((50 - MaxScriptRunsInBlock / 10), simplePrivateKey.toAddress.toString))))
      _                  <- miner.waitForHeight(6)
      blockWithSetScript <- miner.blockHeadersAt(2)
      restBlocks         <- miner.blockHeadersSeq(3, 4)
      newBlock           <- miner.blockHeadersAt(5)
    } yield {
      blockWithSetScript.transactionCount should (be <= (MaxScriptRunsInBlock + 1) and be >= 1)
      restBlocks.foreach { x =>
        x.transactionCount should be(MaxScriptRunsInBlock)
      }
      newBlock.transactionCount should be > MaxScriptRunsInBlock
    },
    12.minutes
  )

  private def setScriptTx(sender: KeyPair) =
    SetScriptTransaction
      .selfSigned(1.toByte, sender = sender, script = Some(ExprScript(V1, Terms.TRUE, checkSize = false).explicitGet()), fee = 1000000, timestamp = System.currentTimeMillis() - 5.minutes.toMillis)
      .explicitGet()

  private def toRequest(tx: SetScriptTransaction): SignedSetScriptRequest = SignedSetScriptRequest(
    version = Some(TxVersion.V1),
    senderPublicKey = tx.sender.toString,
    script = tx.script.map(_.bytes().base64),
    fee = tx.fee,
    timestamp = tx.timestamp,
    proofs = tx.proofs
  )

} 
Example 124
Source File: RootActorSystem.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.actor

import akka.actor.{ActorSystem, AllForOneStrategy, SupervisorStrategy, SupervisorStrategyConfigurator}
import com.typesafe.config.Config
import com.wavesplatform.utils.ScorexLogging

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object RootActorSystem extends ScorexLogging {
  @volatile private var failed = false

  final class EscalatingStrategy extends SupervisorStrategyConfigurator {
    override def create(): SupervisorStrategy = AllForOneStrategy(loggingEnabled = false) {
      case t: Throwable =>
        failed = true
        log.error("Root actor got exception, escalate", t)
        SupervisorStrategy.Escalate
    }
  }

  def start(id: String, config: Config)(init: ActorSystem => Unit): Unit = {
    val system = ActorSystem(id, config)
    try {
      init(system)
    } catch {
      case t: Throwable =>
        log.error(s"Error while initializing actor system $id", t)
        sys.exit(1)
    }

    Await.result(system.whenTerminated, Duration.Inf)
    if (failed) {
      sys.exit(1)
    } else {
      sys.exit(0)
    }
  }
} 
Example 125
Source File: InfluxDBSpanReporter.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.metrics

import com.typesafe.config.Config
import com.wavesplatform.utils.ScorexLogging
import kamon.module.{ModuleFactory, SpanReporter}
import kamon.tag.Tag
import kamon.trace.Span
import org.influxdb.dto.Point

class InfluxDBSpanReporterFactory extends ModuleFactory {
  override def create(settings: ModuleFactory.Settings): InfluxDBSpanReporter =
    new InfluxDBSpanReporter
}

class InfluxDBSpanReporter extends SpanReporter with ScorexLogging {
  override def reportSpans(spans: Seq[Span.Finished]): Unit = spans.foreach { span =>
    def createPoint(name: String): Point.Builder =
      (span.tags.all() ++ span.metricTags.all())
        .foldLeft(Point.measurement(name))((bp, t) => bp.tag(t.key, String.valueOf(Tag.unwrapValue(t))))

    val (pointWithMarks, _) = span.marks.foldRight((createPoint(span.operationName), span.from.toEpochMilli)) {
      case (m, (bp, lastMarkTime)) =>
        val currentTime  = m.instant.toEpochMilli
        val relativeDiff = currentTime - lastMarkTime
        (
          bp.addField(m.key, relativeDiff),
          currentTime
        )
    }
    Metrics.write(pointWithMarks, span.from.toEpochMilli)

    val timeMarks = ("span.started" -> span.from) +: span.marks.reverse.map(m => (m.key, m.instant)) :+ ("span.finished" -> span.to)
    timeMarks.foreach {
      case (name, time) =>
        val point = createPoint(s"${span.operationName}.marks").tag("mark", name)
        Metrics.write(point, time.toEpochMilli)
    }
  }

  override def stop(): Unit                         = ()
  override def reconfigure(newConfig: Config): Unit = ()
} 
Example 126
Source File: SystemInformationReporter.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.utils

import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions}

object SystemInformationReporter extends ScorexLogging {
  def report(config: Config): Unit = {
    val resolved = config.resolve()
    val configForLogs = {
      val orig = Seq(
        "waves",
        "metrics"
      ).foldLeft(ConfigFactory.empty()) { case (r, path) => r.withFallback(resolved.withOnlyPath(path)) }

      Seq(
        "waves.custom.genesis",
        "waves.wallet",
        "waves.rest-api.api-key-hash",
        "metrics.influx-db"
      ).foldLeft(orig)(_.withoutPath(_))
    }

    val renderOptions = ConfigRenderOptions
      .defaults()
      .setOriginComments(false)
      .setComments(false)
      .setFormatted(false)

    val logInfo: Seq[(String, Any)] = Seq(
      "Available processors" -> Runtime.getRuntime.availableProcessors,
      "Max memory available" -> Runtime.getRuntime.maxMemory
    ) ++ Seq(
      "os.name",
      "os.version",
      "os.arch",
      "java.version",
      "java.vendor",
      "java.home",
      "java.class.path",
      "user.dir",
      "sun.net.inetaddr.ttl",
      "sun.net.inetaddr.negative.ttl",
      "networkaddress.cache.ttl",
      "networkaddress.cache.negative.ttl"
    ).map { x =>
      x -> System.getProperty(x)
    } ++ Seq(
      "Configuration" -> configForLogs.root.render(renderOptions)
    )

    log.debug(logInfo.map { case (n, v) => s"$n: $v" }.mkString("\n"))
  }
} 
Example 127
Source File: WavesSettings.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.settings

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.metrics.Metrics
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._

import scala.concurrent.duration.FiniteDuration

case class WavesSettings(
    directory: String,
    ntpServer: String,
    dbSettings: DBSettings,
    extensions: Seq[String],
    extensionsShutdownTimeout: FiniteDuration,
    networkSettings: NetworkSettings,
    walletSettings: WalletSettings,
    blockchainSettings: BlockchainSettings,
    minerSettings: MinerSettings,
    restAPISettings: RestAPISettings,
    synchronizationSettings: SynchronizationSettings,
    utxSettings: UtxSettings,
    featuresSettings: FeaturesSettings,
    rewardsSettings: RewardsVotingSettings,
    metrics: Metrics.Settings,
    config: Config
)

object WavesSettings extends CustomValueReaders {
  def fromRootConfig(rootConfig: Config): WavesSettings = {
    val waves = rootConfig.getConfig("waves")

    val directory                 = waves.as[String]("directory")
    val ntpServer                 = waves.as[String]("ntp-server")
    val dbSettings                = waves.as[DBSettings]("db")
    val extensions                = waves.as[Seq[String]]("extensions")
    val extensionsShutdownTimeout = waves.as[FiniteDuration]("extensions-shutdown-timeout")
    val networkSettings           = waves.as[NetworkSettings]("network")
    val walletSettings            = waves.as[WalletSettings]("wallet")
    val blockchainSettings        = waves.as[BlockchainSettings]("blockchain")
    val minerSettings             = waves.as[MinerSettings]("miner")
    val restAPISettings           = waves.as[RestAPISettings]("rest-api")
    val synchronizationSettings   = waves.as[SynchronizationSettings]("synchronization")
    val utxSettings               = waves.as[UtxSettings]("utx")
    val featuresSettings          = waves.as[FeaturesSettings]("features")
    val rewardsSettings           = waves.as[RewardsVotingSettings]("rewards")
    val metrics                   = rootConfig.as[Metrics.Settings]("metrics") // TODO: Move to waves section

    WavesSettings(
      directory,
      ntpServer,
      dbSettings,
      extensions,
      extensionsShutdownTimeout,
      networkSettings,
      walletSettings,
      blockchainSettings,
      minerSettings,
      restAPISettings,
      synchronizationSettings,
      utxSettings,
      featuresSettings,
      rewardsSettings,
      metrics,
      rootConfig
    )
  }

  def default(): WavesSettings = fromRootConfig(ConfigFactory.load())
} 
Example 128
Source File: ConfigOps.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.settings.utils

import cats.data.Validated
import com.typesafe.config.Config
import com.wavesplatform.settings.utils.ConfigSettingsValidator.ErrorListOrOps
import net.ceedubs.ficus.readers.ValueReader

object ConfigOps {

  implicit class ConfigOps(config: Config) {

    val cfgValidator = ConfigSettingsValidator(config)

    def getValidatedSet[T: ValueReader](path: String): Set[T] = {
      cfgValidator.validateList[T](path).map(_.toSet) getValueOrThrowErrors
    }

    def getValidatedMap[K, V: ValueReader](path: String)(keyValidator: String => Validated[String, K]): Map[K, V] = {
      cfgValidator.validateMap(path)(keyValidator) getValueOrThrowErrors
    }

    def getValidatedByPredicate[T: ValueReader](path: String)(predicate: T => Boolean, errorMsg: String): T = {
      cfgValidator.validateByPredicate(path)(predicate, errorMsg) getValueOrThrowErrors
    }
  }
} 
Example 129
Source File: ConfigSettingsValidator.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.settings.utils

import cats.data.{NonEmptyList, Validated, ValidatedNel}
import cats.implicits._
import com.typesafe.config.{Config, ConfigException}
import com.wavesplatform.transaction.assets.exchange.AssetPair
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ValueReader

import scala.jdk.CollectionConverters._
import scala.util.Try

object ConfigSettingsValidator {

  type ErrorsListOr[A] = ValidatedNel[String, A]

  def apply(config: Config): ConfigSettingsValidator = new ConfigSettingsValidator(config)

  implicit class ErrorListOrOps[A](validatedValue: ErrorsListOr[A]) {
    def getValueOrThrowErrors: A = validatedValue valueOr (errorsAcc => throw new Exception(errorsAcc.mkString_(", ")))
  }

  object AdhocValidation {
    def validateAssetPairKey(key: String): Validated[String, AssetPair] =
      Validated.fromTry(AssetPair.fromString(key)) leftMap (_ => s"Can't parse asset pair '$key'")
  }
}

class ConfigSettingsValidator(config: Config) {

  import ConfigSettingsValidator.ErrorsListOr

  private def createError[T](settingName: String, errorMsg: String, showError: Boolean = true, showValue: Boolean = true): NonEmptyList[String] = {

    lazy val value = config.getValue(settingName).unwrapped

    lazy val msg = (showValue, showError) match {
      case (true, true)  => s"$value ($errorMsg)"
      case (true, false) => s"$value"
      case (false, true) => s"$errorMsg"
      case _             => ""
    }

    NonEmptyList.one(s"Invalid setting $settingName value: $msg")
  }

  def validate[T: ValueReader](settingName: String, showError: Boolean = false): ErrorsListOr[T] = {
    Validated fromTry Try(config.as[T](settingName)) leftMap (ex => createError(settingName, ex.getMessage, showError))
  }

  def validateByPredicate[T: ValueReader](settingName: String)(predicate: T => Boolean, errorMsg: String): ErrorsListOr[T] = {
    validate[T](settingName, showError = true).ensure(createError(settingName, errorMsg))(predicate)
  }

  def validatePercent(settingName: String): ErrorsListOr[Double] = {
    validateByPredicate[Double](settingName)(p => 0 < p && p <= 100, "required 0 < percent <= 100")
  }

  def validateList[T: ValueReader](settingName: String): ErrorsListOr[List[T]] = {
    config
      .getList(settingName)
      .asScala
      .toList
      .zipWithIndex
      .traverse {
        case (cfg, index) =>
          val elemPath = s"$settingName.$index"
          Validated fromTry Try(cfg.atPath(elemPath).as[T](elemPath)) leftMap (ex => List(ex.getMessage))
      }
      .leftMap(errorsInList => createError(settingName, errorsInList.mkString(", "), showValue = false))
  }

  def validateMap[K, V: ValueReader](settingName: String)(keyValidator: String => Validated[String, K]): ErrorsListOr[Map[K, V]] = {
    config
      .getConfig(settingName)
      .root()
      .entrySet()
      .asScala
      .toList
      .traverse { entry =>
        val elemPath = s"$settingName.${entry.getKey}"
        val k        = keyValidator(entry.getKey).leftMap(List(_))
        val v        = Validated fromTry Try(entry.getValue.atPath(elemPath).as[V](elemPath)) leftMap (ex => List(ex.getMessage))
        k.product(v)
      }
      .map(_.toMap)
      .leftMap(errorsInList => createError(settingName, errorsInList.mkString(", "), showValue = false))
  }

  def validateWithDefault[T: ValueReader](settingName: String, defaultValue: T, showError: Boolean = false): ErrorsListOr[T] = {
    Validated
      .fromTry(Try(config.as[T](settingName)).recover { case _: ConfigException.Missing => defaultValue })
      .leftMap(ex => createError(settingName, ex.getMessage, showError))
  }

  def validateByPredicateWithDefault[T: ValueReader](
      settingName: String)(predicate: T => Boolean, errorMsg: String, defaultValue: T): ErrorsListOr[T] = {
    validateWithDefault[T](settingName, defaultValue, showError = true).ensure(createError(settingName, errorMsg))(predicate)
  }
} 
Example 130
Source File: AckCordGatewaySettings.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.util

import akka.actor.typed.ActorSystem
import com.typesafe.config.Config


class AckCordGatewaySettings(config: Config) {
  import config._

  val LogReceivedWs: Boolean = getBoolean("ackcord.logging.payloads.log-received-ws")
  val LogSentWs: Boolean     = getBoolean("ackcord.logging.payloads.log-sent-ws")

  val LogJsonTraces: Boolean    = getBoolean("ackcord.logging.traces.log-json-traces")
  val OnlyUniqueTraces: Boolean = getBoolean("ackcord.logging.traces.only-unique-traces")
  val NumTraces: Int            = getInt("ackcord.logging.traces.num-traces")
}
object AckCordGatewaySettings {

  def apply()(implicit system: ActorSystem[Nothing]): AckCordGatewaySettings =
    new AckCordGatewaySettings(system.settings.config)
} 
Example 131
Source File: MySQLUtil.scala    From SqlShift   with MIT License 5 votes vote down vote up
package com.goibibo.sqlshift

import java.net.URL
import java.sql.{Connection, DriverManager}
import java.util.Properties

import com.typesafe.config.Config
import org.slf4j.{Logger, LoggerFactory}

import scala.io.Source


object MySQLUtil {
    private val logger: Logger = LoggerFactory.getLogger(this.getClass)

    private def getMySQLConnection(config: Config): Connection = {
        val mysql = config.getConfig("mysql")
        val connectionProps = new Properties()
        connectionProps.put("user", mysql.getString("username"))
        connectionProps.put("password", mysql.getString("password"))
        val jdbcUrl = s"jdbc:mysql://${mysql.getString("hostname")}:${mysql.getInt("portno")}/${mysql.getString("db")}?createDatabaseIfNotExist=true&useSSL=false"
        Class.forName("com.mysql.jdbc.Driver")
        DriverManager.getConnection(jdbcUrl, connectionProps)
    }

    def createTableAndInsertRecords(config: Config, tableName: String, psvFile: URL): Unit = {
        logger.info("Inserting records in table: {}", tableName)
        val records = Source.fromFile(psvFile.toURI).getLines().toList.drop(1) // removing header

        val conn = getMySQLConnection(config)
        val statement = conn.createStatement()
        try {
            val tableCreateQuery = config.getString("table.tableCreateQuery").replace("${tableName}", tableName)
            logger.info("Running query: {}", tableCreateQuery)
            statement.executeUpdate(tableCreateQuery)
            val insertIntoQuery = config.getString("table.insertIntoQuery").replace("${tableName}", tableName)
            logger.info("Running query: {}", insertIntoQuery)
            records.foreach { record: String =>
                val columns = record.split("\\|")
                val query = insertIntoQuery.format(columns: _*)
                statement.executeUpdate(query)
            }
        } finally {
            statement.close()
            conn.close()
        }
    }
} 
Example 132
Source File: SparkNRedshiftUtil.scala    From SqlShift   with MIT License 5 votes vote down vote up
package com.goibibo.sqlshift

import java.sql.{Connection, DriverManager}
import java.util.Properties

import com.databricks.spark.redshift.RedshiftReaderM
import com.typesafe.config.Config
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.{Logger, LoggerFactory}


trait SparkNRedshiftUtil extends BeforeAndAfterAll {
    self: Suite =>
    private val logger: Logger = LoggerFactory.getLogger(this.getClass)
    @transient private var _sc: SparkContext = _
    @transient private var _sqlContext: SQLContext = _

    def sc: SparkContext = _sc
    def sqlContext: SQLContext = _sqlContext

    private def getRedshiftConnection(config: Config): Connection = {
        val mysql = config.getConfig("redshift")
        val connectionProps = new Properties()
        connectionProps.put("user", mysql.getString("username"))
        connectionProps.put("password", mysql.getString("password"))
        val jdbcUrl = s"jdbc:redshift://${mysql.getString("hostname")}:${mysql.getInt("portno")}/${mysql.getString("database")}?useSSL=false"
        Class.forName("com.amazon.redshift.jdbc4.Driver")
        DriverManager.getConnection(jdbcUrl, connectionProps)
    }

    val getSparkContext: (SparkContext, SQLContext) = {
        val sparkConf: SparkConf = new SparkConf().setAppName("Full Dump Testing").setMaster("local")
        val sc: SparkContext = new SparkContext(sparkConf)
        val sqlContext: SQLContext = new SQLContext(sc)

        System.setProperty("com.amazonaws.services.s3.enableV4", "true")
        sc.hadoopConfiguration.set("fs.s3a.endpoint", "s3.ap-south-1.amazonaws.com")
        sc.hadoopConfiguration.set("fs.s3a.fast.upload", "true")
        (sc, sqlContext)
    }

    def readTableFromRedshift(config: Config, tableName: String): DataFrame = {
        val redshift: Config = config.getConfig("redshift")
        val options = Map("dbtable" -> tableName,
            "user" -> redshift.getString("username"),
            "password" -> redshift.getString("password"),
            "url" -> s"jdbc:redshift://${redshift.getString("hostname")}:${redshift.getInt("portno")}/${redshift.getString("database")}",
            "tempdir" -> config.getString("s3.location"),
            "aws_iam_role" -> config.getString("redshift.iamRole")
        )
        RedshiftReaderM.getDataFrameForConfig(options, sc, sqlContext)
    }

    def dropTableRedshift(config: Config, tables: String*): Unit = {
        logger.info("Droping table: {}", tables)
        val conn = getRedshiftConnection(config)
        val statement = conn.createStatement()
        try {
            val dropTableQuery = s"""DROP TABLE ${tables.mkString(",")}"""
            logger.info("Running query: {}", dropTableQuery)
            statement.executeUpdate(dropTableQuery)
        } finally {
            statement.close()
            conn.close()
        }
    }

    override protected def beforeAll(): Unit = {
        super.beforeAll()
        val (sc, sqlContext) = getSparkContext
        _sc = sc
        _sqlContext = sqlContext
    }

    override protected def afterAll(): Unit = {
        super.afterAll()
        _sc.stop()
    }
} 
Example 133
Source File: JmeConfig.scala    From shapenet-viewer   with MIT License 5 votes vote down vote up
package edu.stanford.graphics.shapenet.jme3

import com.typesafe.config.{ConfigFactory, Config}
import edu.stanford.graphics.shapenet.util.ConfigHelper
import edu.stanford.graphics.shapenet.jme3.loaders.LoadFormat
import scala.collection.JavaConversions._


class JmeConfig(val modelCacheSize: Option[Int] = None,
                val defaultLoadFormat: Option[LoadFormat.Value] = None) {
}

object JmeConfig {
  // Stupid type-config - have to define defaults for everything....
  val defaults = ConfigFactory.parseMap(
    Map[String,Object](
    )
  )
  def apply(): JmeConfig = JmeConfig(ConfigFactory.empty())
  def apply(inputConfig: Config, name: String = "jme"): JmeConfig = {
    val config = if (inputConfig == null) defaults else inputConfig.withFallback(defaults)
    val configHelper = new ConfigHelper(config)
    new JmeConfig(
      modelCacheSize = configHelper.getIntOption(name + ".modelCacheSize"),
      defaultLoadFormat = configHelper.getStringOption(name + ".defaultLoadFormat").map( s => LoadFormat.withName(s))
    )
  }
} 
Example 134
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.aggregator

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    modules.add(new AppModule)
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
          bind(classOf[Registry]).toInstance(new NoopRegistry)
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 135
Source File: AtlasAggregatorService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.aggregator

import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Clock
import com.netflix.spectator.api.Id
import com.netflix.spectator.api.Registry
import com.netflix.spectator.atlas.AtlasRegistry
import com.typesafe.config.Config
import javax.inject.Inject
import javax.inject.Singleton

@Singleton
class AtlasAggregatorService @Inject()(config: Config, clock: Clock, registry: Registry)
    extends AbstractService
    with Aggregator {

  private val n = math.max(1, Runtime.getRuntime.availableProcessors() / 2)
  private val aggrCfg = new AggrConfig(config, registry)
  private val registries = (0 until n)
    .map(_ => new AtlasRegistry(clock, aggrCfg))
    .toArray

  override def startImpl(): Unit = {
    registries.foreach(_.start())
  }

  override def stopImpl(): Unit = {
    registries.foreach(_.stop())
  }

  def lookup(id: Id): AtlasRegistry = {
    // Max is needed because for Integer.MIN_VALUE the abs value will be negative
    val i = math.max(math.abs(id.hashCode()), 0) % n
    registries(i)
  }

  override def add(id: Id, value: Double): Unit = {
    lookup(id).counter(id).add(value)
  }

  override def max(id: Id, value: Double): Unit = {
    lookup(id).maxGauge(id).set(value)
  }
} 
Example 136
Source File: AppModuleSuite.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.aggregator

import javax.inject.Singleton
import com.google.inject.AbstractModule
import com.google.inject.Guice
import com.google.inject.Provider
import com.netflix.spectator.api.Clock
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.netflix.spectator.atlas.AtlasConfig
import com.netflix.spectator.atlas.AtlasRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.scalatest.funsuite.AnyFunSuite

class AppModuleSuite extends AnyFunSuite {

  import AppModuleSuite._

  private val config = ConfigFactory.load()

  test("aggr service") {
    val injector = Guice.createInjector(
      new AppModule,
      new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(config)
          bind(classOf[Registry]).toProvider(classOf[RegistryProvider])
        }
      }
    )

    val aggr = injector.getInstance(classOf[AtlasAggregatorService])
    assert(aggr != null)
  }

  test("aggr config should use prefix") {
    val config = ConfigFactory.parseString("""
        |netflix.atlas.aggr.registry.atlas.uri = "test"
      """.stripMargin)
    val aggr = new AggrConfig(config, new NoopRegistry)
    assert(aggr.uri() === "test")
  }

  test("aggr config should use default for missing props") {
    val config = ConfigFactory.parseString("""
        |netflix.atlas.aggr.registry.atlas.uri = "test"
      """.stripMargin)
    val aggr = new AggrConfig(config, new NoopRegistry)
    assert(aggr.batchSize() === 10000)
  }
}

object AppModuleSuite {

  @Singleton
  class RegistryProvider extends Provider[Registry] {
    override def get(): Registry = {
      val cfg = new AtlasConfig {
        override def get(k: String): String = null
      }
      new AtlasRegistry(Clock.SYSTEM, cfg)
    }
  }
} 
Example 137
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.druid

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple versions of registry
      // and config bindings. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Registry]).toInstance(new NoopRegistry)
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }
} 
Example 138
Source File: ExplainApi.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.druid

import akka.actor.ActorRefFactory
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.RouteResult
import akka.pattern.ask
import akka.util.Timeout
import com.netflix.atlas.akka.CustomDirectives._
import com.netflix.atlas.akka.WebApi
import com.netflix.atlas.druid.ExplainApi.ExplainRequest
import com.netflix.atlas.eval.graph.Grapher
import com.netflix.atlas.json.Json
import com.netflix.atlas.webapi.GraphApi.DataRequest
import com.typesafe.config.Config

import scala.concurrent.duration._

class ExplainApi(config: Config, implicit val actorRefFactory: ActorRefFactory) extends WebApi {

  private val grapher: Grapher = Grapher(config)

  private val dbRef = actorRefFactory.actorSelection("/user/db")

  private implicit val ec = actorRefFactory.dispatcher

  override def routes: Route = {
    endpointPath("explain" / "v1" / "graph") {
      get { ctx =>
        val graphCfg = grapher.toGraphConfig(ctx.request)
        dbRef
          .ask(ExplainRequest(DataRequest(graphCfg)))(Timeout(10.seconds))
          .map { response =>
            val json = Json.encode(response)
            val entity = HttpEntity(MediaTypes.`application/json`, json)
            RouteResult.Complete(HttpResponse(StatusCodes.OK, entity = entity))
          }
      }
    }
  }
}

object ExplainApi {
  case class ExplainRequest(dataRequest: DataRequest)
} 
Example 139
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.stream

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
          bind(classOf[Registry]).toInstance(new NoopRegistry)
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 140
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.lwc

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new AppModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 141
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.lwc

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new AppModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 142
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.slotting

import com.amazonaws.services.autoscaling.AmazonAutoScaling
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import com.amazonaws.services.ec2.AmazonEC2
import com.google.inject.AbstractModule
import com.google.inject.Module
import com.google.inject.Provides
import com.google.inject.multibindings.Multibinder
import com.netflix.iep.aws.AwsClientFactory
import com.netflix.iep.guice.BaseModule
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.Service
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Singleton

object Main extends StrictLogging {

  private def isLocalEnv: Boolean = !sys.env.contains("EC2_INSTANCE_ID")

  private def getBaseModules: java.util.List[Module] = {
    val modules = {
      GuiceHelper.getModulesUsingServiceLoader
    }

    if (isLocalEnv) {
      // If we are running in a local environment, provide simple versions of registry
      // and config bindings. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Registry]).toInstance(new NoopRegistry)
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }

    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new ServerModule)

      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

  class ServerModule extends BaseModule {
    override def configure(): Unit = {
      val serviceBinder = Multibinder.newSetBinder(binder(), classOf[Service])
      serviceBinder.addBinding().to(classOf[SlottingService])
    }

    @Provides
    @Singleton
    protected def providesAmazonDynamoDB(factory: AwsClientFactory): AmazonDynamoDB = {
      factory.getInstance(classOf[AmazonDynamoDB])
    }

    @Provides
    @Singleton
    protected def providesAmazonEC2(factory: AwsClientFactory): AmazonEC2 = {
      factory.getInstance(classOf[AmazonEC2])
    }

    @Provides
    @Singleton
    protected def providesAmazonAutoScaling(factory: AwsClientFactory): AmazonAutoScaling = {
      factory.getInstance(classOf[AmazonAutoScaling])
    }
  }
} 
Example 143
Source File: Util.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.slotting

import java.nio.ByteBuffer
import java.time.Duration
import java.util.concurrent.ScheduledFuture

import com.netflix.iep.NetflixEnvironment
import com.netflix.spectator.api.Registry
import com.netflix.spectator.impl.Scheduler
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging

object Util extends StrictLogging {

  def getLongOrDefault(config: Config, basePath: String): Long = {
    val env = NetflixEnvironment.accountEnv()
    val region = NetflixEnvironment.region()

    if (config.hasPath(s"$basePath.$env.$region"))
      config.getLong(s"$basePath.$env.$region")
    else
      config.getLong(s"$basePath.default")
  }

  def compress(s: String): ByteBuffer = {
    ByteBuffer.wrap(Gzip.compressString(s))
  }

  def decompress(buf: ByteBuffer): String = {
    Gzip.decompressString(toByteArray(buf))
  }

  def toByteArray(buf: ByteBuffer): Array[Byte] = {
    val bytes = new Array[Byte](buf.remaining)
    buf.get(bytes, 0, bytes.length)
    buf.clear()
    bytes
  }

  def startScheduler(
    registry: Registry,
    name: String,
    interval: Duration,
    fn: () => Unit
  ): ScheduledFuture[_] = {
    val scheduler = new Scheduler(registry, name, 2)
    val options = new Scheduler.Options()
      .withFrequency(Scheduler.Policy.FIXED_RATE_SKIP_IF_LONG, interval)
    scheduler.schedule(options, () => fn())
  }

} 
Example 144
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.StrictLogging

object Main extends StrictLogging {

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
          bind(classOf[Registry]).toInstance(new NoopRegistry)
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new AppModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }
} 
Example 145
Source File: S3CopyService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import java.io.File
import java.nio.file.Files
import java.nio.file.Paths

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.KillSwitch
import akka.stream.KillSwitches
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Source
import com.netflix.atlas.core.util.Streams
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.duration._

@Singleton
class S3CopyService @Inject()(
  val config: Config,
  val registry: Registry,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {

  private val dataDir = config.getString("atlas.persistence.local-file.data-dir")

  private implicit val mat = ActorMaterializer()

  private var killSwitch: KillSwitch = _
  private val s3Config = config.getConfig("atlas.persistence.s3")

  private val cleanupTimeoutMs = s3Config.getDuration("cleanup-timeout").toMillis
  private val maxInactiveMs = s3Config.getDuration("max-inactive-duration").toMillis
  private val maxFileDurationMs =
    config.getDuration("atlas.persistence.local-file.max-duration").toMillis

  require(
    maxInactiveMs > maxFileDurationMs,
    "`max-inactive-duration` MUST be longer than `max-duration`, otherwise file may be renamed before normal write competes"
  )

  override def startImpl(): Unit = {
    logger.info("Starting service")
    killSwitch = Source
      .tick(1.second, 5.seconds, NotUsed)
      .viaMat(KillSwitches.single)(Keep.right)
      .flatMapMerge(Int.MaxValue, _ => Source(FileUtil.listFiles(new File(dataDir))))
      .toMat(new S3CopySink(s3Config, registry, system))(Keep.left)
      .run()
  }

  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    waitForCleanup()
    if (killSwitch != null) killSwitch.shutdown()
  }

  private def waitForCleanup(): Unit = {
    logger.info("Waiting for cleanup")
    val start = System.currentTimeMillis
    while (hasMoreFiles) {
      if (System.currentTimeMillis() > start + cleanupTimeoutMs) {
        logger.error("Cleanup timeout")
        return
      }
      Thread.sleep(1000)
    }
    logger.info("Cleanup done")
  }

  private def hasMoreFiles: Boolean = {
    try {
      Streams.scope(Files.list(Paths.get(dataDir))) { dir =>
        dir.anyMatch(f => Files.isRegularFile(f))
      }
    } catch {
      case e: Exception => {
        logger.error(s"Error checking hasMoreFiles in $dataDir", e)
        true // Assuming there's more files on error to retry
      }
    }
  }
} 
Example 146
Source File: LocalFilePersistService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import akka.Done
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.RestartFlow
import akka.stream.scaladsl.Sink
import com.netflix.atlas.akka.StreamOps
import com.netflix.atlas.akka.StreamOps.SourceQueue
import com.netflix.atlas.core.model.Datapoint
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration.Duration

@Singleton
class LocalFilePersistService @Inject()(
  val config: Config,
  val registry: Registry,
  // S3CopyService is actually NOT used by this service, it is here just to guarantee that the
  // shutdown callback (stopImpl) of this service is invoked before S3CopyService's
  val s3CopyService: S3CopyService,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {
  implicit val ec = scala.concurrent.ExecutionContext.global
  implicit val mat = ActorMaterializer()

  private val queueSize = config.getInt("atlas.persistence.queue-size")

  private val fileConfig = config.getConfig("atlas.persistence.local-file")
  private val dataDir = fileConfig.getString("data-dir")
  private val maxRecords = fileConfig.getLong("max-records")
  private val maxDurationMs = fileConfig.getDuration("max-duration").toMillis
  private val maxLateDurationMs = fileConfig.getDuration("max-late-duration").toMillis
  private val rollingConf = RollingConfig(maxRecords, maxDurationMs, maxLateDurationMs)

  require(queueSize > 0)
  require(maxRecords > 0)
  require(maxDurationMs > 0)

  private var queue: SourceQueue[Datapoint] = _
  private var flowComplete: Future[Done] = _

  override def startImpl(): Unit = {
    logger.info("Starting service")
    val (q, f) = StreamOps
      .blockingQueue[Datapoint](registry, "LocalFilePersistService", queueSize)
      .via(getRollingFileFlow)
      .toMat(Sink.ignore)(Keep.both)
      .run
    queue = q
    flowComplete = f
  }

  private def getRollingFileFlow(): Flow[Datapoint, NotUsed, NotUsed] = {
    import scala.concurrent.duration._
    RestartFlow.withBackoff(
      minBackoff = 1.second,
      maxBackoff = 3.seconds,
      randomFactor = 0,
      maxRestarts = -1
    ) { () =>
      Flow.fromGraph(
        new RollingFileFlow(dataDir, rollingConf, registry)
      )
    }
  }

  // This service should stop the Akka flow when application is shutdown gracefully, and let
  // S3CopyService do the cleanup. It should trigger:
  //   1. stop taking more data points (monitor droppedQueueClosed)
  //   2. close current file writer so that last file is ready to copy to s3
  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    queue.complete()
    Await.result(flowComplete, Duration.Inf)
    logger.info("Stopped service")
  }

  def persist(dp: Datapoint): Unit = {
    queue.offer(dp)
  }
} 
Example 147
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.clienttest

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.google.inject.multibindings.Multibinder
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.Service
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private val config = ConfigFactory.load()

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple versions of registry
      // and config bindings. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Registry]).toInstance(new NoopRegistry)
          bind(classOf[Config]).toInstance(config)
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new InstrumentationModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

  class InstrumentationModule extends AbstractModule {
    override def configure(): Unit = {
      val cls = Class.forName(config.getString("netflix.iep.clienttest.class"))
      bind(classOf[MetricLibrary]).to(cls.asInstanceOf[Class[MetricLibrary]])

      val serviceBinder = Multibinder.newSetBinder(binder(), classOf[Service])
      serviceBinder.addBinding().to(classOf[InstrumentationService])
      bind(classOf[InstrumentationService])
    }
  }
} 
Example 148
Source File: InstrumentationService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.clienttest

import java.util.UUID
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import javax.inject.Inject
import javax.inject.Singleton

import com.netflix.iep.service.AbstractService
import com.typesafe.config.Config
import org.slf4j.LoggerFactory

@Singleton
class InstrumentationService @Inject()(config: Config, metrics: MetricLibrary)
    extends AbstractService {

  private val logger = LoggerFactory.getLogger("")

  private val tagsPerMetric = config.getInt("netflix.iep.clienttest.tags-per-metric")

  private val numCounters = config.getInt("netflix.iep.clienttest.num-counters")
  private val numTimers = config.getInt("netflix.iep.clienttest.num-timers")
  private val numDistSummaries = config.getInt("netflix.iep.clienttest.num-dist-summaries")
  private val numGauges = config.getInt("netflix.iep.clienttest.num-gauges")
  private val numPolledGauges = config.getInt("netflix.iep.clienttest.num-polled-gauges")
  private val numSlowPolledGauges = config.getInt("netflix.iep.clienttest.num-slow-polled-gauges")

  // To minimize other noise in terms of memory use and computation we use the same base tag
  // set for all metrics.
  private val tagsData = (0 until tagsPerMetric).map { i =>
    val key = f"$i%05d"
    key -> UUID.randomUUID().toString
  }.toMap

  private val executor = Executors.newScheduledThreadPool(2)
  executor.scheduleWithFixedDelay(() => update(), 0L, 10, TimeUnit.SECONDS)

  // Polled sources only need to be registered once
  (0 until numPolledGauges).foreach { i =>
    metrics.poll("polledGauge", createTags(i), i.toDouble)
  }
  (0 until numSlowPolledGauges).foreach { i =>
    metrics.poll("slowPolledGauge", createTags(i), {
      Thread.sleep(120000)
      i.toDouble
    })
  }

  private def update(): Unit = {
    logger.info("update starting")
    logger.info(s"updating $numCounters counters")
    (0 until numCounters).foreach { i =>
      metrics.increment("counter", createTags(i))
    }
    logger.info(s"updating $numTimers timers")
    (0 until numTimers).foreach { i =>
      metrics.recordTime("timer", createTags(i), i)
    }
    logger.info(s"updating $numDistSummaries distribution summaries")
    (0 until numDistSummaries).foreach { i =>
      metrics.recordTime("distSummary", createTags(i), i)
    }
    logger.info(s"updating $numGauges gauges")
    (0 until numGauges).foreach { i =>
      metrics.set("gauge", createTags(i), i)
    }
    logger.info("update complete")
  }

  private def createTags(i: Int): Map[String, String] = {
    tagsData + ("id" -> i.toString)
  }

  override def startImpl(): Unit = ()

  override def stopImpl(): Unit = {
    executor.shutdownNow()
  }
} 
Example 149
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.archaius

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import com.google.inject.AbstractModule
import com.google.inject.Module
import com.google.inject.Provides
import com.google.inject.multibindings.Multibinder
import com.netflix.iep.aws.AwsClientFactory
import com.netflix.iep.guice.BaseModule
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.Service
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple versions of registry
      // and config bindings. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Registry]).toInstance(new NoopRegistry)
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new ServerModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

  class ServerModule extends BaseModule {
    override def configure(): Unit = {
      val serviceBinder = Multibinder.newSetBinder(binder(), classOf[Service])
      serviceBinder.addBinding().toConstructor(getConstructor(classOf[DynamoService]))
      bind(classOf[DynamoService])
      bind(classOf[PropertiesContext])
    }

    // Visibility of protected to avoid unused method warning from scala compiler
    @Provides
    protected def providesDynamoDBClient(factory: AwsClientFactory): AmazonDynamoDB = {
      factory.newInstance(classOf[AmazonDynamoDB])
    }
  }
} 
Example 150
Source File: DynamoService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.archaius

import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicLong
import javax.inject.Inject
import javax.inject.Singleton

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import com.netflix.iep.service.AbstractService
import com.typesafe.config.Config

import scala.concurrent.ExecutionContext
import scala.concurrent.Future


@Singleton
class DynamoService @Inject()(client: AmazonDynamoDB, config: Config) extends AbstractService {

  private val nextId = new AtomicLong()
  private val pool = Executors.newFixedThreadPool(
    Runtime.getRuntime.availableProcessors(),
    (r: Runnable) => {
      new Thread(r, s"dynamo-db-${nextId.getAndIncrement()}")
    }
  )
  private val ec = ExecutionContext.fromExecutorService(pool)

  override def startImpl(): Unit = ()

  override def stopImpl(): Unit = {
    client match {
      case c: AmazonDynamoDBClient => c.shutdown()
      case _                       =>
    }
  }

  def execute[T](task: AmazonDynamoDB => T): Future[T] = Future(task(client))(ec)
} 
Example 151
Source File: PropertiesLoader.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.archaius

import akka.actor.Actor
import com.amazonaws.services.dynamodbv2.model.ScanRequest
import com.netflix.atlas.json.Json
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging

import scala.util.Failure
import scala.util.Success


class PropertiesLoader(config: Config, propContext: PropertiesContext, dynamoService: DynamoService)
    extends Actor
    with StrictLogging {

  private val table = config.getString("netflix.iep.archaius.table")

  import scala.concurrent.duration._
  import scala.concurrent.ExecutionContext.Implicits.global
  context.system.scheduler.schedule(5.seconds, 5.seconds, self, PropertiesLoader.Tick)

  def receive: Receive = {
    case PropertiesLoader.Tick =>
      val future = dynamoService.execute { client =>
        val matches = List.newBuilder[PropertiesApi.Property]
        val request = new ScanRequest().withTableName(table)
        var response = client.scan(request)
        matches ++= process(response.getItems)
        while (response.getLastEvaluatedKey != null) {
          request.setExclusiveStartKey(response.getLastEvaluatedKey)
          response = client.scan(request)
          matches ++= process(response.getItems)
        }
        matches.result()
      }

      future.onComplete {
        case Success(vs) => propContext.update(vs)
        case Failure(t)  => logger.error("failed to refresh properties from dynamodb", t)
      }
  }

  private def process(items: Items): PropList = {
    import scala.jdk.CollectionConverters._
    items.asScala
      .filter(_.containsKey("data"))
      .map(_.get("data").getS)
      .map(s => Json.decode[PropertiesApi.Property](s))
      .toList
  }
}

object PropertiesLoader {
  case object Tick
} 
Example 152
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.lwc.fwd.admin

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the
      // config binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new AppModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent
      // for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 153
Source File: ExprInterpreter.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.lwc.fwd.admin

import akka.http.scaladsl.model.Uri
import com.netflix.atlas.core.model.CustomVocabulary
import com.netflix.atlas.core.model.ModelExtractors
import com.netflix.atlas.core.model.StyleExpr
import com.netflix.atlas.core.stacklang.Interpreter
import com.typesafe.config.Config
import javax.inject.Inject

class ExprInterpreter @Inject()(config: Config) {

  private val interpreter = Interpreter(new CustomVocabulary(config).allWords)

  def eval(atlasUri: String): List[StyleExpr] = {
    eval(Uri(atlasUri))
  }

  def eval(uri: Uri): List[StyleExpr] = {
    val expr = uri.query().get("q").getOrElse {
      throw new IllegalArgumentException(
        s"missing required URI parameter `q`: $uri"
      )
    }

    doEval(expr)
  }

  def doEval(expr: String): List[StyleExpr] = {
    interpreter.execute(expr).stack.map {
      case ModelExtractors.PresentationType(t) => t
    }
  }

} 
Example 154
Source File: ScalingPoliciesTestImpl.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.lwc.fwd.admin

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.typesafe.config.Config

class ScalingPoliciesTestImpl(
  config: Config,
  dao: ScalingPoliciesDao,
  policies: Map[EddaEndpoint, List[ScalingPolicy]] = Map.empty[EddaEndpoint, List[ScalingPolicy]]
) extends ScalingPolicies(config, dao) {
  scalingPolicies = policies
  override def startPeriodicTimer(): Unit = {}
}

class ScalingPoliciesDaoTestImpl(
  policies: Map[EddaEndpoint, List[ScalingPolicy]]
) extends ScalingPoliciesDao {
  protected implicit val ec = scala.concurrent.ExecutionContext.global

  override def getScalingPolicies: Flow[EddaEndpoint, List[ScalingPolicy], NotUsed] = {
    Flow[EddaEndpoint]
      .map(policies(_))
  }
} 
Example 155
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.loadgen

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new AppModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 156
Source File: Main.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.ses

import com.google.inject.AbstractModule
import com.google.inject.Module
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.ServiceManager
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

object Main {

  private val logger = LoggerFactory.getLogger(getClass)

  private def getBaseModules: java.util.List[Module] = {
    val modules = GuiceHelper.getModulesUsingServiceLoader
    if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
      // If we are running in a local environment provide simple version of the config
      // binding. These bindings are normally provided by the final package
      // config for the app in the production setup.
      modules.add(new AbstractModule {
        override def configure(): Unit = {
          bind(classOf[Config]).toInstance(ConfigFactory.load())
        }
      })
    }
    modules
  }

  def main(args: Array[String]): Unit = {
    try {
      val modules = getBaseModules
      modules.add(new AppModule)
      val guice = new GuiceHelper
      guice.start(modules)
      guice.getInjector.getInstance(classOf[ServiceManager])
      guice.addShutdownHook()
    } catch {
      // Send exceptions to main log file instead of wherever STDERR is sent for the process
      case t: Throwable => logger.error("fatal error on startup", t)
    }
  }

} 
Example 157
Source File: SequenceFileIOSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.fsio

import scala.concurrent.Future
import scala.util.{Success, Try}

import com.typesafe.config.Config
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfterAll, Matchers, PropSpec}

import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication
import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}

class SequenceFileIOSpec
  extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll with MasterHarness {

  override def beforeAll {
    startActorSystem()
  }

  override def afterAll {
    shutdownActorSystem()
  }

  override def config: Config = TestUtil.DEFAULT_CONFIG

  property("SequenceFileIO should succeed to submit application with required arguments") {
    val requiredArgs = Array(
      "-input", "/tmp/input",
      "-output", "/tmp/output"
    )
    val optionalArgs = Array(
      "-source", "1",
      "-sink", "1"
    )
    val validArgs = {
      Table(
        ("requiredArgs", "optionalArgs"),
        (requiredArgs, optionalArgs)
      )
    }
    val masterReceiver = createMockMaster()
    forAll(validArgs) { (requiredArgs: Array[String], optionalArgs: Array[String]) =>
      val args = requiredArgs ++ optionalArgs

      Future {
        SequenceFileIO.main(masterConfig, args)
      }
      masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME)
      masterReceiver.reply(SubmitApplicationResult(Success(0)))
    }

    val invalidArgs = {
      Table(
        ("requiredArgs", "optionalArgs"),
        (requiredArgs.take(0), optionalArgs),
        (requiredArgs.take(2), optionalArgs)
      )
    }
    forAll(invalidArgs) { (requiredArgs: Array[String], optionalArgs: Array[String]) =>
      val args = optionalArgs
      assert(Try(SequenceFileIO.main(args)).isFailure, "missing required arguments, print usage")
    }
  }
} 
Example 158
Source File: SOLSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.sol

import scala.concurrent.Future
import scala.util.Success

import com.typesafe.config.Config
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfterAll, Matchers, PropSpec}

import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication
import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}

class SOLSpec
  extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll with MasterHarness {
  override def beforeAll {
    startActorSystem()
  }

  override def afterAll {
    shutdownActorSystem()
  }

  override def config: Config = TestUtil.DEFAULT_CONFIG

  property("SOL should succeed to submit application with required arguments") {
    val requiredArgs = Array.empty[String]
    val optionalArgs = Array(
      "-streamProducer", "1",
      "-streamProcessor", "1",
      "-bytesPerMessage", "100",
      "-stages", "10")

    val args = {
      Table(
        ("requiredArgs", "optionalArgs"),
        (requiredArgs, optionalArgs)
      )
    }
    val masterReceiver = createMockMaster()
    forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) =>
      val args = requiredArgs ++ optionalArgs

      Future {
        SOL.main(masterConfig, args)
      }

      masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME)
      masterReceiver.reply(SubmitApplicationResult(Success(0)))
    }
  }
} 
Example 159
Source File: WindowAverageAppSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.state

import scala.concurrent.Future
import scala.util.Success

import com.typesafe.config.Config
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfter, Matchers, PropSpec}

import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication
import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}

class WindowAverageAppSpec
  extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness {

  before {
    startActorSystem()
  }

  after {
    shutdownActorSystem()
  }

  override def config: Config = TestUtil.DEFAULT_CONFIG

  property("WindowAverage should succeed to submit application with required arguments") {
    val requiredArgs = Array.empty[String]
    val optionalArgs = Array(
      "-gen", "2",
      "-window", "2",
      "-window_size", "5000",
      "-window_step", "5000"
    )

    val args = {
      Table(
        ("requiredArgs", "optionalArgs"),
        (requiredArgs, optionalArgs.take(0)),
        (requiredArgs, optionalArgs.take(2)),
        (requiredArgs, optionalArgs.take(4)),
        (requiredArgs, optionalArgs.take(6)),
        (requiredArgs, optionalArgs)
      )
    }
    val masterReceiver = createMockMaster()
    forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) =>
      val args = requiredArgs ++ optionalArgs

      Future {
        WindowAverageApp.main(masterConfig, args)
      }

      masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME)
      masterReceiver.reply(SubmitApplicationResult(Success(0)))
    }
  }
} 
Example 160
Source File: KafkaWordCountSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.kafka.wordcount

import scala.concurrent.Future
import scala.util.Success

import com.typesafe.config.Config
import org.scalatest.prop.PropertyChecks
import org.scalatest.{BeforeAndAfter, Matchers, PropSpec}

import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication
import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}

class KafkaWordCountSpec
  extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness {

  before {
    startActorSystem()
  }

  after {
    shutdownActorSystem()
  }

  override def config: Config = TestUtil.DEFAULT_CONFIG

  property("KafkaWordCount should succeed to submit application with required arguments") {
    val requiredArgs = Array.empty[String]
    val optionalArgs = Array(
      "-source", "1",
      "-split", "1",
      "-sum", "1",
      "-sink", "1")

    val args = {
      Table(
        ("requiredArgs", "optionalArgs"),
        (requiredArgs, optionalArgs)
      )
    }
    val masterReceiver = createMockMaster()
    forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) =>
      val args = requiredArgs ++ optionalArgs

      Future {
        KafkaWordCount.main(masterConfig, args)
      }

      masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME)
      masterReceiver.reply(SubmitApplicationResult(Success(0)))
    }
  }
} 
Example 161
Source File: DistServiceAppMaster.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.distributeservice

import java.io.File
import org.apache.gearpump.cluster.MasterToAppMaster.WorkerList

import scala.concurrent.Future

import akka.actor.{Deploy, Props}
import akka.pattern.{ask, pipe}
import akka.remote.RemoteScope
import com.typesafe.config.Config
import org.slf4j.Logger

import org.apache.gearpump.cluster.ClientToMaster.ShutdownApplication
import org.apache.gearpump.cluster.appmaster.ExecutorSystemScheduler.{ExecutorSystemJvmConfig, ExecutorSystemStarted, StartExecutorSystemTimeout}
import org.apache.gearpump.cluster.{AppDescription, AppMasterContext, ApplicationMaster, ExecutorContext}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.{FileContainer, GetFileContainer, InstallService}
import org.apache.gearpump.util._

class DistServiceAppMaster(appContext: AppMasterContext, app: AppDescription)
  extends ApplicationMaster {
  import appContext._
  import context.dispatcher
  implicit val timeout = Constants.FUTURE_TIMEOUT
  private val LOG: Logger = LogUtil.getLogger(getClass, app = appId)
  private var currentExecutorId = 0
  private var workerNum: Option[Int] = None
  private var fileServerPort = -1

  val rootDirectory = new File("/")
  val host = context.system.settings.config.getString(Constants.GEARPUMP_HOSTNAME)
  val server = context.actorOf(Props(classOf[FileServer], rootDirectory, host, 0))

  override def preStart(): Unit = {
    LOG.info(s"Distribute Service AppMaster started")
    ActorUtil.launchExecutorOnEachWorker(masterProxy, getExecutorJvmConfig, self)
  }

  (server ? FileServer.GetPort).asInstanceOf[Future[FileServer.Port]] pipeTo self

  override def receive: Receive = {
    case ExecutorSystemStarted(executorSystem, _) =>
      import executorSystem.{address, resource => executorResource, worker}
      val executorContext = ExecutorContext(currentExecutorId, worker,
        appId, app.name, self, executorResource)
      // start executor
      val executor = context.actorOf(Props(classOf[DistServiceExecutor],
        executorContext, app.userConfig).withDeploy(
        Deploy(scope = RemoteScope(address))), currentExecutorId.toString)
      executorSystem.bindLifeCycleWith(executor)
      currentExecutorId += 1
      ActorUtil.tellMasterIfApplicationReady(workerNum, currentExecutorId, appContext)
    case WorkerList(workers) =>
      workerNum = Some(workers.length)
      ActorUtil.tellMasterIfApplicationReady(workerNum, currentExecutorId, appContext)
    case StartExecutorSystemTimeout =>
      LOG.error(s"Failed to allocate resource in time")
      masterProxy ! ShutdownApplication(appId)
      context.stop(self)
    case FileServer.Port(port) =>
      this.fileServerPort = port
    case GetFileContainer =>
      val name = Math.abs(new java.util.Random().nextLong()).toString
      sender ! new FileContainer(s"http://$host:$fileServerPort/$name")
    case installService: InstallService =>
      context.children.foreach(_ ! installService)
  }

  private def getExecutorJvmConfig: ExecutorSystemJvmConfig = {
    val config: Config = app.clusterConfig
    val jvmSetting = Util.resolveJvmSetting(
      config.withFallback(context.system.settings.config)).executor
    ExecutorSystemJvmConfig(jvmSetting.classPath, jvmSetting.vmargs,
      appJar, username, config)
  }
}

object DistServiceAppMaster {
  case object GetFileContainer

  case class FileContainer(url: String)

  case class InstallService(
      url: String,
      zipFileName: String,
      targetPath: String,
      script: Array[Byte],
      serviceName: String,
      serviceSettings: Map[String, Any])
} 
Example 162
Source File: DFSJarStore.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.jarstore.dfs

import java.io.{InputStream, OutputStream}
import org.apache.gearpump.util.Constants
import org.apache.gearpump.jarstore.JarStore
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import com.typesafe.config.Config
import org.apache.hadoop.fs.permission.{FsAction, FsPermission}


  override def getFile(fileName: String): InputStream = {
    val filePath = new Path(rootPath, fileName)
    val fs = filePath.getFileSystem(new Configuration())
    fs.open(filePath)
  }

  private def createDirIfNotExists(path: Path): Unit = {
    val fs = path.getFileSystem(new Configuration())
    if (!fs.exists(path)) {
      fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))
    }
  }
} 
Example 163
Source File: StaticServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.duration._
import scala.util.Try

import akka.http.scaladsl.model.headers.`Cache-Control`
import akka.http.scaladsl.testkit.{RouteTestTimeout, ScalatestRouteTest}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.util.Constants
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._

class StaticServiceSpec
  extends FlatSpec with ScalatestRouteTest with Matchers with BeforeAndAfterAll {

  override def testConfig: Config = TestUtil.UI_CONFIG
  private val supervisorPath = system.settings.config.getString(
    Constants.GEARPUMP_SERVICE_SUPERVISOR_PATH)

  protected def route = new StaticService(system, supervisorPath).route

  it should "return version" in {
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Get(s"/version") ~> route) ~> check {
      val responseBody = responseAs[String]
      val config = Try(ConfigFactory.parseString(responseBody))
      assert(responseBody == "Unknown-Version")

      // By default, it will be cached.
      assert(header[`Cache-Control`].isEmpty)
    }
  }

  it should "get correct supervisor path" in {
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Get(s"/supervisor-actor-path") ~> route) ~> check {
      val responseBody = responseAs[String]
      val defaultSupervisorPath = ""
      assert(responseBody == defaultSupervisorPath)
    }
  }
} 
Example 164
Source File: AdminServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.http.scaladsl.testkit.{RouteTestTimeout, ScalatestRouteTest}
import com.typesafe.config.Config
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil

// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._

class AdminServiceSpec
  extends FlatSpec with ScalatestRouteTest with Matchers with BeforeAndAfterAll {

  override def testConfig: Config = TestUtil.DEFAULT_CONFIG

  implicit def actorSystem: ActorSystem = system

  it should "shutdown the ActorSystem when receiving terminate" in {
    val route = new AdminService(actorSystem).route
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Post(s"/terminate") ~> route) ~> check {
      assert(status.intValue() == 404)
    }

    Await.result(actorSystem.whenTerminated, 20.seconds)

    // terminate should terminate current actor system
    assert(actorSystem.whenTerminated.isCompleted)
  }
} 
Example 165
Source File: WorkerServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.duration._
import scala.util.{Success, Try}

import akka.actor.ActorRef
import akka.http.scaladsl.model.headers.`Cache-Control`
import akka.http.scaladsl.testkit.{RouteTestTimeout, ScalatestRouteTest}
import akka.testkit.TestActor.{AutoPilot, KeepRunning}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData}
import org.apache.gearpump.cluster.ClientToMaster.{QueryHistoryMetrics, QueryWorkerConfig, ResolveWorkerId}
import org.apache.gearpump.cluster.MasterToClient.{HistoryMetrics, HistoryMetricsItem, ResolveWorkerIdResult, WorkerConfig}
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.worker.{WorkerId, WorkerSummary}
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._

class WorkerServiceSpec
  extends FlatSpec with ScalatestRouteTest with Matchers with BeforeAndAfterAll {

  override def testConfig: Config = TestUtil.DEFAULT_CONFIG

  protected def actorRefFactory = system

  val mockWorker = TestProbe()

  protected def master = mockMaster.ref

  protected def workerRoute = new WorkerService(master, system).route

  mockWorker.setAutoPilot {
    new AutoPilot {
      def run(sender: ActorRef, msg: Any): AutoPilot = msg match {
        case GetWorkerData(workerId) =>
          sender ! WorkerData(WorkerSummary.empty)
          KeepRunning
        case QueryWorkerConfig(workerId) =>
          sender ! WorkerConfig(null)
          KeepRunning
        case QueryHistoryMetrics(path, _, _, _) =>
          sender ! HistoryMetrics(path, List.empty[HistoryMetricsItem])
          KeepRunning
      }
    }
  }

  val mockMaster = TestProbe()
  mockMaster.setAutoPilot {
    new AutoPilot {
      def run(sender: ActorRef, msg: Any): AutoPilot = msg match {
        case ResolveWorkerId(workerId) =>
          sender ! ResolveWorkerIdResult(Success(mockWorker.ref))
          KeepRunning
      }
    }
  }

  "ConfigQueryService" should "return config for worker" in {
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Get(s"/api/$REST_VERSION/worker/${WorkerId.render(WorkerId(0, 0L))}/config")
      ~> workerRoute) ~> check {
      val responseBody = responseAs[String]
      val config = Try(ConfigFactory.parseString(responseBody))
      assert(config.isSuccess)
    }
  }

  it should "return WorkerData" in {
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Get(s"/api/$REST_VERSION/worker/${WorkerId.render(WorkerId(1, 0L))}")
      ~> workerRoute) ~> check {
      val responseBody = responseAs[String]
      val config = Try(ConfigFactory.parseString(responseBody))
      assert(config.isSuccess)

      // Check the header, should contains no-cache header.
      // Cache-Control:no-cache, max-age=0
      val noCache = header[`Cache-Control`].get.value()
      assert(noCache == "no-cache, max-age=0")
    }
  }

  "MetricsQueryService" should "return history metrics" in {
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Get(s"/api/$REST_VERSION/worker/${WorkerId.render(WorkerId(0, 0L))}/metrics/worker")
      ~> workerRoute) ~> check {
      val responseBody = responseAs[String]
      val config = Try(ConfigFactory.parseString(responseBody))
      assert(config.isSuccess)
    }
  }

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 166
Source File: CGroupProcessLauncher.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.worker

import java.io.File
import scala.sys.process.Process

import com.typesafe.config.Config
import org.slf4j.{Logger, LoggerFactory}

import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.util.{ProcessLogRedirector, RichProcess}


class CGroupProcessLauncher(val config: Config) extends ExecutorProcessLauncher {
  private val APP_MASTER = -1
  private val cgroupManager: Option[CGroupManager] = CGroupManager.getInstance(config)
  private val LOG: Logger = LoggerFactory.getLogger(getClass)

  override def cleanProcess(appId: Int, executorId: Int): Unit = {
    if (executorId != APP_MASTER) {
      cgroupManager.foreach(_.shutDownExecutor(appId, executorId))
    }
  }

  override def createProcess(
      appId: Int, executorId: Int, resource: Resource, appConfig: Config, options: Array[String],
    classPath: Array[String], mainClass: String, arguments: Array[String]): RichProcess = {
    val cgroupCommand = if (executorId != APP_MASTER) {
      cgroupManager.map(_.startNewExecutor(appConfig, resource.slots, appId,
        executorId)).getOrElse(List.empty)
    } else List.empty
    LOG.info(s"Launch executor $executorId with CGroup ${cgroupCommand.mkString(" ")}, " +
      s"classpath: ${classPath.mkString(File.pathSeparator)}")

    val java = System.getProperty("java.home") + "/bin/java"
    val command = cgroupCommand ++ List(java) ++ options ++ List("-cp", classPath
      .mkString(File.pathSeparator), mainClass) ++ arguments
    LOG.info(s"Starting executor process java $mainClass ${arguments.mkString(" ")}; " +
      s"options: ${options.mkString(" ")}")
    val logger = new ProcessLogRedirector()
    val process = Process(command).run(logger)
    new RichProcess(process, logger)
  }
} 
Example 167
Source File: NMClient.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.yarn.glue

import java.nio.ByteBuffer

import akka.actor.ActorRef
import com.typesafe.config.Config
import org.apache.gearpump.experiments.yarn.appmaster.YarnAppMaster.ContainerStarted
import org.apache.gearpump.experiments.yarn.glue.Records._
import org.apache.gearpump.util.LogUtil
import org.apache.hadoop.yarn.api.records.{ContainerId => YarnContainerId, ContainerStatus => YarnContainerStatus}
import org.apache.hadoop.yarn.client.api.async.NMClientAsync
import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl

class NMClient(yarnConf: YarnConfig, config: Config) extends NMClientAsync.CallbackHandler {

  private val LOG = LogUtil.getLogger(getClass)

  private var reportTo: ActorRef = null
  private var client: NMClientAsyncImpl = null

  def start(reportTo: ActorRef): Unit = {
    LOG.info("Starting Node Manager Client NMClient...")
    this.reportTo = reportTo
    client = new NMClientAsyncImpl(this)
    client.init(yarnConf.conf)
    client.start()
  }

  private[glue]
  override def onContainerStarted(
      containerId: YarnContainerId, allServiceResponse: java.util.Map[String, ByteBuffer]) {
    LOG.info(s"Container started : $containerId, " + allServiceResponse)
    reportTo ! ContainerStarted(containerId)
  }

  private[glue]
  override def onContainerStatusReceived(
      containerId: YarnContainerId, containerStatus: YarnContainerStatus) {
    LOG.info(s"Container status received : $containerId, status $containerStatus")
  }

  private[glue]
  override def onContainerStopped(containerId: YarnContainerId) {
    LOG.error(s"Container stopped : $containerId")
  }

  private[glue]
  override def onGetContainerStatusError(containerId: YarnContainerId, throwable: Throwable) {
    LOG.error(s"Container exception : $containerId", throwable)
  }

  private[glue]
  override def onStartContainerError(containerId: YarnContainerId, throwable: Throwable) {
    LOG.error(s"Container exception : $containerId", throwable)
  }

  private[glue]
  override def onStopContainerError(containerId: YarnContainerId, throwable: Throwable) {
    LOG.error(s"Container exception : $containerId", throwable)
  }

  def launchCommand(
      container: Container, command: String, packagePath: String, configPath: String): Unit = {
    LOG.info(s"Launching command : $command on container" +
      s":  ${container.getId}, host ip : ${container.getNodeId.getHost}")
    val context = ContainerLaunchContext(yarnConf.conf, command, packagePath, configPath)
    client.startContainerAsync(container, context)
  }

  def stopContainer(containerId: ContainerId, nodeId: NodeId): Unit = {
    LOG.info(s"Stop container ${containerId.toString} on node: ${nodeId.toString} ")
    client.stopContainerAsync(containerId, nodeId)
  }

  def stop(): Unit = {
    LOG.info(s"Shutdown NMClient")
    client.stop()
  }
} 
Example 168
Source File: TaskLocator.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.appmaster

import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions, ConfigValueFactory}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.streaming.appmaster.TaskLocator.{Localities, Locality, NonLocality, WorkerLocality}
import org.apache.gearpump.streaming.task.TaskId

import scala.collection.JavaConverters._
import scala.util.Try


  case class Localities(localities: Map[WorkerId, Array[TaskId]])

  object Localities {
    val pattern = "task_([0-9]+)_([0-9]+)".r

    // To avoid polluting the classpath, we do the JSON translation ourself instead of
    // introducing JSON library dependencies directly.
    def fromJson(json: String): Localities = {
      val localities = ConfigFactory.parseString(json).getAnyRef("localities")
        .asInstanceOf[java.util.Map[String, String]].asScala.map { pair =>
        val workerId: WorkerId = WorkerId.parse(pair._1)
        val tasks = pair._2.split(",").map { task =>
          val pattern(processorId, taskIndex) = task
          TaskId(processorId.toInt, taskIndex.toInt)
        }
        (workerId, tasks)
      }.toMap
      new Localities(localities)
    }

    def toJson(localities: Localities): String = {
      val map = localities.localities.toList.map { pair =>
        (WorkerId.render(pair._1), pair._2.map(task =>
          s"task_${task.processorId}_${task.index}").mkString(","))
      }.toMap.asJava
      ConfigFactory.empty().withValue("localities", ConfigValueFactory.fromAnyRef(map)).
        root.render(ConfigRenderOptions.concise())
    }
  }
} 
Example 169
Source File: ConfigFileBasedAuthenticator.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.security

import scala.concurrent.{ExecutionContext, Future}

import com.typesafe.config.Config

import org.apache.gearpump.security.Authenticator.AuthenticationResult
import org.apache.gearpump.security.ConfigFileBasedAuthenticator._

object ConfigFileBasedAuthenticator {

  private val ROOT = "gearpump.ui-security.config-file-based-authenticator"
  private val ADMINS = ROOT + "." + "admins"
  private val USERS = ROOT + "." + "users"
  private val GUESTS = ROOT + "." + "guests"

  private case class Credentials(
      admins: Map[String, String], users: Map[String, String], guests: Map[String, String]) {

    def verify(user: String, password: String): AuthenticationResult = {
      if (admins.contains(user)) {
        if (verify(user, password, admins)) {
          Authenticator.Admin
        } else {
          Authenticator.UnAuthenticated
        }
      } else if (users.contains(user)) {
        if (verify(user, password, users)) {
          Authenticator.User
        } else {
          Authenticator.UnAuthenticated
        }
      } else if (guests.contains(user)) {
        if (verify(user, password, guests)) {
          Authenticator.Guest
        } else {
          Authenticator.UnAuthenticated
        }
      } else {
        Authenticator.UnAuthenticated
      }
    }

    private def verify(user: String, password: String, map: Map[String, String]): Boolean = {
      val storedPass = map(user)
      PasswordUtil.verify(password, storedPass)
    }
  }
}


class ConfigFileBasedAuthenticator(config: Config) extends Authenticator {

  private val credentials = loadCredentials(config)

  override def authenticate(user: String, password: String, ec: ExecutionContext)
    : Future[AuthenticationResult] = {
    implicit val ctx = ec
    Future {
      credentials.verify(user, password)
    }
  }

  private def loadCredentials(config: Config): Credentials = {
    val admins = configToMap(config, ADMINS)
    val users = configToMap(config, USERS)
    val guests = configToMap(config, GUESTS)
    new Credentials(admins, users, guests)
  }

  private def configToMap(config: Config, path: String) = {
    import scala.collection.JavaConverters._
    config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) => k -> v.toString }
  }
} 
Example 170
Source File: NettyConfig.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.transport.netty

import com.typesafe.config.Config

import org.apache.gearpump.util.Constants

class NettyConfig(conf: Config) {

  val buffer_size = conf.getInt(Constants.NETTY_BUFFER_SIZE)
  val max_retries = conf.getInt(Constants.NETTY_MAX_RETRIES)
  val base_sleep_ms = conf.getInt(Constants.NETTY_BASE_SLEEP_MS)
  val max_sleep_ms = conf.getInt(Constants.NETTY_MAX_SLEEP_MS)
  val messageBatchSize = conf.getInt(Constants.NETTY_MESSAGE_BATCH_SIZE)
  val flushCheckInterval = conf.getInt(Constants.NETTY_FLUSH_CHECK_INTERVAL)

  def newTransportSerializer: ITransportMessageSerializer = {
    Class.forName(
      conf.getString(Constants.GEARPUMP_TRANSPORT_SERIALIZER))
      .newInstance().asInstanceOf[ITransportMessageSerializer]
  }
} 
Example 171
Source File: Context.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.transport.netty

import java.io.Closeable
import java.util.concurrent._

import scala.collection.JavaConverters._

import akka.actor.{ActorRef, ActorSystem, Props}
import com.typesafe.config.Config
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.slf4j.Logger

import org.apache.gearpump.transport.netty.Server.ServerPipelineFactory
import org.apache.gearpump.transport.{ActorLookupById, HostPort}
import org.apache.gearpump.util.{Constants, LogUtil}

object Context {
  private final val LOG: Logger = LogUtil.getLogger(getClass)
}


  def close(): Unit = {

    LOG.info(s"Context.term, cleanup resources...., " +
      s"we have ${closeHandler.size()} items to close...")

    // Cleans up resource in reverse order so that client actor can be cleaned
    // before clientChannelFactory
    closeHandler.iterator().asScala.toList.reverse.foreach(_.close())
  }
} 
Example 172
Source File: GearpumpSerialization.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.serializer

import com.esotericsoftware.kryo.{Kryo, Serializer => KryoSerializer}
import com.typesafe.config.Config
import org.slf4j.Logger
import org.apache.gearpump.util.{Constants, LogUtil}

class GearpumpSerialization(config: Config) {

  private val LOG: Logger = LogUtil.getLogger(getClass)

  def customize(kryo: Kryo): Unit = {

    val serializationMap = configToMap(config, Constants.GEARPUMP_SERIALIZERS)

    serializationMap.foreach { kv =>
      val (key, value) = kv
      val keyClass = Class.forName(key)

      if (value == null || value.isEmpty) {

        // Use default serializer for this class type
        kryo.register(keyClass)
      } else {
        val valueClass = Class.forName(value)
        val register = kryo.register(keyClass,
          valueClass.newInstance().asInstanceOf[KryoSerializer[_]])
        LOG.debug(s"Registering ${keyClass}, id: ${register.getId}")
      }
    }
    kryo.setReferences(false)

    // Requires the user to register the class first before using
    kryo.setRegistrationRequired(true)
  }

  private final def configToMap(config: Config, path: String) = {
    import scala.collection.JavaConverters._
    config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) => k -> v.toString }
  }
} 
Example 173
Source File: EmbeddedRuntimeEnvironment.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.embedded

import com.typesafe.config.Config
import org.apache.gearpump.cluster.client.{ClientContext, RuntimeEnvironment}
import org.apache.gearpump.cluster.embedded.EmbeddedRuntimeEnvironment.EmbeddedClientContext


class EmbeddedRuntimeEnvironment extends RuntimeEnvironment {
  override def newClientContext(akkaConf: Config): ClientContext = {
    new EmbeddedClientContext(akkaConf)
  }
}

object EmbeddedRuntimeEnvironment {
  class EmbeddedClientContext private(cluster: EmbeddedCluster)
    extends ClientContext(cluster.config, cluster.system, cluster.master) {

    def this(akkaConf: Config) {
      this(new EmbeddedCluster(akkaConf))
    }

    override def close(): Unit = {
      super.close()
      cluster.stop()
    }
  }
} 
Example 174
Source File: EmbeddedCluster.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.embedded

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import akka.actor.{ActorRef, ActorSystem, Props}
import com.typesafe.config.{Config, ConfigValueFactory}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.{Master => MasterActor}
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.util.Constants.{GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS, GEARPUMP_CLUSTER_MASTERS, GEARPUMP_METRIC_ENABLED, MASTER}
import org.apache.gearpump.util.{LogUtil, Util}


class EmbeddedCluster(inputConfig: Config) {
  private val LOG = LogUtil.getLogger(getClass)
  private val workerCount: Int = 1
  private val port = Util.findFreePort().get
  private[embedded] val config: Config = getConfig(inputConfig, port)
  private[embedded] val system: ActorSystem = ActorSystem(MASTER, config)
  private[embedded] val master: ActorRef = system.actorOf(Props[MasterActor], MASTER)

  0.until(workerCount).foreach { id =>
    system.actorOf(Props(classOf[WorkerActor], master), classOf[WorkerActor].getSimpleName + id)
  }

  LOG.info("=================================")
  LOG.info("Local Cluster is started at: ")
  LOG.info(s"                 127.0.0.1:$port")
  LOG.info(s"To see UI, run command: services -master 127.0.0.1:$port")

  private def getConfig(inputConfig: Config, port: Int): Config = {
    val config = inputConfig.
      withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(port)).
      withValue(GEARPUMP_CLUSTER_MASTERS,
        ConfigValueFactory.fromIterable(List(s"127.0.0.1:$port").asJava)).
      withValue(GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS,
        ConfigValueFactory.fromAnyRef(true)).
      withValue(GEARPUMP_METRIC_ENABLED, ConfigValueFactory.fromAnyRef(true)).
      withValue("akka.actor.provider",
        ConfigValueFactory.fromAnyRef("akka.cluster.ClusterActorRefProvider"))
    config
  }

  def stop(): Unit = {
    system.stop(master)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object EmbeddedCluster {
  def apply(): EmbeddedCluster = {
    new EmbeddedCluster(ClusterConfig.master())
  }
} 
Example 175
Source File: ClusterConfigSource.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster

import java.io.File
import scala.language.implicitConversions

import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions}


  def apply(filePath: String): ClusterConfigSource = {

    if (null == filePath) {
      new ClusterConfigSourceImpl(ConfigFactory.empty())
    } else {
      var config = ConfigFactory.parseFileAnySyntax(new File(filePath),
        ConfigParseOptions.defaults.setAllowMissing(true))

      if (null == config || config.isEmpty) {
        config = ConfigFactory.parseResourcesAnySyntax(filePath,
          ConfigParseOptions.defaults.setAllowMissing(true))
      }
      new ClusterConfigSourceImpl(config)
    }
  }

  implicit def FilePathToClusterConfigSource(filePath: String): ClusterConfigSource = {
    apply(filePath)
  }

  private class ClusterConfigSourceImpl(config: Config) extends ClusterConfigSource {
    override def getConfig: Config = config
  }
} 
Example 176
Source File: ApplicationRuntimeInfo.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import akka.actor.ActorRef
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.gearpump.Time.MilliSeconds
import org.apache.gearpump.cluster.{ApplicationStatus, ApplicationTerminalStatus}


case class ApplicationRuntimeInfo(
    appId: Int,
    // AppName is the unique Id for an application
    appName: String,
    appMaster: ActorRef = ActorRef.noSender,
    worker: ActorRef = ActorRef.noSender,
    user: String = "",
    submissionTime: MilliSeconds = 0,
    startTime: MilliSeconds = 0,
    finishTime: MilliSeconds = 0,
    config: Config = ConfigFactory.empty(),
    status: ApplicationStatus = ApplicationStatus.NONEXIST) {

  def onAppMasterRegistered(appMaster: ActorRef, worker: ActorRef): ApplicationRuntimeInfo = {
    this.copy(appMaster = appMaster, worker = worker)
  }

  def onAppMasterActivated(timeStamp: MilliSeconds): ApplicationRuntimeInfo = {
    this.copy(startTime = timeStamp, status = ApplicationStatus.ACTIVE)
  }

  def onFinalStatus(timeStamp: MilliSeconds, finalStatus: ApplicationTerminalStatus):
    ApplicationRuntimeInfo = {
    this.copy(finishTime = timeStamp, status = finalStatus)
  }
} 
Example 177
Source File: RuntimeEnvironment.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.client

import com.typesafe.config.Config
import org.apache.gearpump.cluster.client.RuntimeEnvironment.RemoteClientContext
import org.apache.gearpump.cluster.embedded.EmbeddedRuntimeEnvironment


class RemoteRuntimeEnvironment extends RuntimeEnvironment {
  override def newClientContext(akkaConf: Config): ClientContext = {
    new RemoteClientContext(akkaConf)
  }
}

object RuntimeEnvironment {
  private var envInstance: RuntimeEnvironment = _

  class RemoteClientContext(akkaConf: Config) extends ClientContext(akkaConf, null, null)

  def get() : RuntimeEnvironment = {
    Option(envInstance).getOrElse(new EmbeddedRuntimeEnvironment)
  }

  def newClientContext(akkaConf: Config): ClientContext = {
    get().newClientContext(akkaConf)
  }

  def setRuntimeEnv(env: RuntimeEnvironment): Unit = {
    envInstance = env
  }
} 
Example 178
Source File: Util.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import java.io.{BufferedReader, File, FileInputStream, InputStreamReader}
import java.net.{ServerSocket, URI}
import scala.concurrent.forkjoin.ThreadLocalRandom
import scala.sys.process.Process
import scala.util.{Failure, Success, Try}

import com.typesafe.config.{Config, ConfigFactory}

import org.apache.gearpump.cluster.AppJar
import org.apache.gearpump.jarstore.JarStoreClient
import org.apache.gearpump.transport.HostPort

object Util {
  val LOG = LogUtil.getLogger(getClass)
  private val defaultUri = new URI("file:///")
  private val appNamePattern = "^[a-zA-Z_][a-zA-Z0-9_]+$".r.pattern

  def validApplicationName(appName: String): Boolean = {
    appNamePattern.matcher(appName).matches()
  }

  def getCurrentClassPath: Array[String] = {
    val classpath = System.getProperty("java.class.path")
    val classpathList = classpath.split(File.pathSeparator)
    classpathList
  }

  def version: String = {
    val home = System.getProperty(Constants.GEARPUMP_HOME)
    val version = Try {
      val versionFile = new FileInputStream(new File(home, "VERSION"))
      val reader = new BufferedReader(new InputStreamReader(versionFile))
      val version = reader.readLine().replace("version:=", "")
      versionFile.close()
      version
    }
    version match {
      case Success(version) =>
        version
      case Failure(ex) =>
        LOG.error("failed to read VERSION file, " + ex.getMessage)
        "Unknown-Version"
    }
  }

  def startProcess(options: Array[String], classPath: Array[String], mainClass: String,
      arguments: Array[String]): RichProcess = {
    val java = System.getProperty("java.home") + "/bin/java"

    val command = List(java) ++ options ++
      List("-cp", classPath.mkString(File.pathSeparator), mainClass) ++ arguments
    LOG.info(s"Starting executor process java $mainClass ${arguments.mkString(" ")} " +
      s"\n ${options.mkString(" ")}")
    val logger = new ProcessLogRedirector()
    val process = Process(command).run(logger)
    new RichProcess(process, logger)
  }

  
  def resolveJvmSetting(conf: Config): AppJvmSettings = {

    import org.apache.gearpump.util.Constants._

    val appMasterVMArgs = Try(conf.getString(GEARPUMP_APPMASTER_ARGS).split("\\s+")
      .filter(_.nonEmpty)).toOption
    val executorVMArgs = Try(conf.getString(GEARPUMP_EXECUTOR_ARGS).split("\\s+")
      .filter(_.nonEmpty)).toOption

    val appMasterClassPath = Try(
      conf.getString(GEARPUMP_APPMASTER_EXTRA_CLASSPATH)
        .split("[;:]").filter(_.nonEmpty)).toOption

    val executorClassPath = Try(
      conf.getString(GEARPUMP_EXECUTOR_EXTRA_CLASSPATH)
        .split(File.pathSeparator).filter(_.nonEmpty)).toOption

    AppJvmSettings(
      JvmSetting(appMasterVMArgs.getOrElse(Array.empty[String]),
        appMasterClassPath.getOrElse(Array.empty[String])),
      JvmSetting(executorVMArgs
        .getOrElse(Array.empty[String]), executorClassPath.getOrElse(Array.empty[String])))
  }

  def asSubDirOfGearpumpHome(dir: String): File = {
    new File(System.getProperty(Constants.GEARPUMP_HOME), dir)

  }
} 
Example 179
Source File: LocalJarStore.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.jarstore.local

import java.io._

import com.typesafe.config.Config
import org.apache.gearpump.jarstore.JarStore
import org.apache.gearpump.util.{Constants, FileUtils, LogUtil, Util}
import org.slf4j.Logger


  override def getFile(fileName: String): InputStream = {
    val localFile = new File(rootPath, fileName)
    val is = try {
      new FileInputStream(localFile)
    } catch {
      case ex: Exception =>
        LOG.error(s"Fetch file $fileName failed: ${ex.getStackTrace}")
        new ClosedInputStream
    }
    is
  }

  private def createDirIfNotExists(file: File): Unit = {
    if (!file.exists()) {
      FileUtils.forceMkdir(file)
    }
  }
} 
Example 180
Source File: JarStoreClient.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.jarstore

import java.io.File
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.Await

import akka.pattern.ask
import akka.actor.{ActorSystem, ActorRef}
import com.typesafe.config.Config
import org.apache.gearpump.cluster.master.MasterProxy
import org.apache.gearpump.util.{Util, Constants, LogUtil}
import org.slf4j.Logger

import org.apache.gearpump.cluster.ClientToMaster.{GetJarStoreServer, JarStoreServerAddress}
import scala.concurrent.{Future, ExecutionContext}

class JarStoreClient(config: Config, system: ActorSystem) {
  private def LOG: Logger = LogUtil.getLogger(getClass)
  private implicit val timeout = Constants.FUTURE_TIMEOUT
  private implicit def dispatcher: ExecutionContext = system.dispatcher

  private val master: ActorRef = {
    val masters = config.getStringList(Constants.GEARPUMP_CLUSTER_MASTERS)
      .asScala.flatMap(Util.parseHostList)
    system.actorOf(MasterProxy.props(masters), s"masterproxy${Util.randInt()}")
  }

  private lazy val client = (master ? GetJarStoreServer).asInstanceOf[Future[JarStoreServerAddress]]
    .map { address =>
      val client = new FileServer.Client(system, address.url)
      client
    }

  
  def copyFromLocal(localFile: File): FilePath = {
    val future = client.flatMap(_.upload(localFile))
    Await.result(future, Duration(60, TimeUnit.SECONDS))
  }
} 
Example 181
Source File: MasterWatcherSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.main

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.Config
import org.apache.gearpump.cluster.TestUtil
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._

class MasterWatcherSpec extends FlatSpec with Matchers {
  def config: Config = TestUtil.MASTER_CONFIG

  "MasterWatcher" should "kill itself when can not get a quorum" in {
    val system = ActorSystem("ForMasterWatcher", config)

    val actorWatcher = TestProbe()(system)

    val masterWatcher = system.actorOf(Props(classOf[MasterWatcher], "watcher"))
    actorWatcher watch masterWatcher
    actorWatcher.expectTerminated(masterWatcher, 5.seconds)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 182
Source File: InMemoryKVServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import akka.actor.Props
import akka.testkit.TestProbe
import com.typesafe.config.Config
import org.apache.gearpump.cluster.master.InMemoryKVService
import org.apache.gearpump.cluster.master.InMemoryKVService._
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers}

import scala.concurrent.duration._

class InMemoryKVServiceSpec
  extends FlatSpec with Matchers with BeforeAndAfterEach with MasterHarness {

  override def beforeEach(): Unit = {
    startActorSystem()
  }

  override def afterEach(): Unit = {
    shutdownActorSystem()
  }

  override def config: Config = TestUtil.MASTER_CONFIG

  "KVService" should "get, put, delete correctly" in {
    val system = getActorSystem
    val kvService = system.actorOf(Props(new InMemoryKVService()))
    val group = "group"

    val client = TestProbe()(system)

    client.send(kvService, PutKV(group, "key", 1))
    client.expectMsg(PutKVSuccess)

    client.send(kvService, PutKV(group, "key", 2))
    client.expectMsg(PutKVSuccess)

    client.send(kvService, GetKV(group, "key"))
    client.expectMsg(GetKVSuccess("key", 2))

    client.send(kvService, DeleteKVGroup(group))

    // After DeleteGroup, it no longer accept Get and Put message for this group.
    client.send(kvService, GetKV(group, "key"))
    client.expectNoMsg(3.seconds)

    client.send(kvService, PutKV(group, "key", 3))
    client.expectNoMsg(3.seconds)
  }
} 
Example 183
Source File: AppMasterLauncherSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.master

import org.apache.gearpump.cluster.worker.WorkerId

import scala.util.Success

import akka.actor._
import akka.testkit.TestProbe
import com.typesafe.config.Config
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers}

import org.apache.gearpump.cluster.AppMasterToMaster.RequestResource
import org.apache.gearpump.cluster.AppMasterToWorker.{LaunchExecutor, ShutdownExecutor}
import org.apache.gearpump.cluster.MasterToAppMaster.ResourceAllocated
import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult
import org.apache.gearpump.cluster.WorkerToAppMaster.ExecutorLaunchRejected
import org.apache.gearpump.cluster.scheduler.{Resource, ResourceAllocation, ResourceRequest}
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}
import org.apache.gearpump.util.ActorSystemBooter._

class AppMasterLauncherSpec extends FlatSpec with Matchers
  with BeforeAndAfterEach with MasterHarness {

  override def config: Config = TestUtil.DEFAULT_CONFIG

  val appId = 1
  val executorId = 2
  var master: TestProbe = null
  var client: TestProbe = null
  var worker: TestProbe = null
  var watcher: TestProbe = null
  var appMasterLauncher: ActorRef = null

  override def beforeEach(): Unit = {
    startActorSystem()
    master = createMockMaster()
    client = TestProbe()(getActorSystem)
    worker = TestProbe()(getActorSystem)
    watcher = TestProbe()(getActorSystem)
    appMasterLauncher = getActorSystem.actorOf(AppMasterLauncher.props(appId, executorId,
      TestUtil.dummyApp, None, "username", master.ref, Some(client.ref)))
    watcher watch appMasterLauncher
    master.expectMsg(RequestResource(appId, ResourceRequest(Resource(1), WorkerId.unspecified)))
    val resource = ResourceAllocated(
      Array(ResourceAllocation(Resource(1), worker.ref, WorkerId(0, 0L))))
    master.reply(resource)
    worker.expectMsgType[LaunchExecutor]
  }

  override def afterEach(): Unit = {
    shutdownActorSystem()
  }

  "AppMasterLauncher" should "launch appmaster correctly" in {
    worker.reply(RegisterActorSystem("systempath"))
    worker.expectMsgType[ActorSystemRegistered]

    worker.expectMsgType[CreateActor]
    worker.reply(ActorCreated(master.ref, "appmaster"))

    client.expectMsg(SubmitApplicationResult(Success(appId)))
    watcher.expectTerminated(appMasterLauncher)
  }

  "AppMasterLauncher" should "reallocate resource if executor launch rejected" in {
    worker.reply(ExecutorLaunchRejected(""))
    master.expectMsg(RequestResource(appId, ResourceRequest(Resource(1), WorkerId.unspecified)))

    val resource = ResourceAllocated(
      Array(ResourceAllocation(Resource(1), worker.ref, WorkerId(0, 0L))))
    master.reply(resource)
    worker.expectMsgType[LaunchExecutor]

    worker.reply(RegisterActorSystem("systempath"))
    worker.expectMsgType[ActorSystemRegistered]

    worker.expectMsgType[CreateActor]
    worker.reply(CreateActorFailed("", new Exception))
    worker.expectMsgType[ShutdownExecutor]
    assert(client.receiveN(1).head.asInstanceOf[SubmitApplicationResult].appId.isFailure)
    watcher.expectTerminated(appMasterLauncher)
  }
} 
Example 184
Source File: SocketConfig.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import com.typesafe.config.Config
import play.api.libs.json.Json

case class SocketConfig (
  stdin_port: Int,
  control_port: Int,
  hb_port: Int,
  shell_port: Int,
  iopub_port: Int,
  ip : String,
  transport: String,
  signature_scheme: String,
  key: String
)

object SocketConfig {
  implicit val socketConfigReads = Json.reads[SocketConfig]
  implicit val socketConfigWrites = Json.writes[SocketConfig]

  def fromConfig(config: Config) = {
    new SocketConfig(
      config.getInt("stdin_port"),
      config.getInt("control_port"),
      config.getInt("hb_port"),
      config.getInt("shell_port"),
      config.getInt("iopub_port"),
      config.getString("ip"),
      config.getString("transport"),
      config.getString("signature_scheme"),
      config.getString("key")
    )
  }
} 
Example 185
Source File: ClientBootstrap.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.boot

import akka.actor.ActorSystem
import org.apache.toree.comm.{CommRegistrar, CommStorage}
import org.apache.toree.kernel.protocol.v5.client.boot.layers._
import org.apache.toree.kernel.protocol.v5.client.socket.{SocketConfig, SocketFactory}
import org.apache.toree.kernel.protocol.v5.client.{SimpleActorLoader, SparkKernelClient}
import org.apache.toree.utils.LogLike
import com.typesafe.config.Config
import org.zeromq.ZMQ

object ClientBootstrap {
  
  def createClient(
    actorSystemName: String = ClientBootstrap.newActorSystemName()
  ): SparkKernelClient = {
    logger.trace(s"Creating new kernel client actor system, '$actorSystemName'")
    val actorSystem = ActorSystem(actorSystemName)

    logger.trace(s"Creating actor loader for actor system, '$actorSystemName'")
    val actorLoader = SimpleActorLoader(actorSystem)

    logger.trace(s"Creating socket factory for actor system, '$actorSystemName")
    val socketFactory = new SocketFactory(SocketConfig.fromConfig(config))

    logger.trace(s"Initializing underlying system for, '$actorSystemName'")
    val (_, _, _, _, commRegistrar, _) =
      initializeSystem(config, actorSystem, actorLoader, socketFactory)

    logger.trace(s"Initializing handlers for, '$actorSystemName'")
    initializeHandlers(actorSystem, actorLoader)

    logger.trace(s"ZeroMQ version: ${ZMQ.getVersionString}")

    new SparkKernelClient(actorLoader, actorSystem, commRegistrar)
  }
} 
Example 186
Source File: AddJar.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.magic.builtin

import java.io.{File, PrintStream}
import java.net.{URL, URI}
import java.nio.file.{Files, Paths}
import java.util.zip.ZipFile
import org.apache.toree.magic._
import org.apache.toree.magic.builtin.AddJar._
import org.apache.toree.magic.dependencies._
import org.apache.toree.utils.{ArgumentParsingSupport, DownloadSupport, LogLike, FileUtils}
import com.typesafe.config.Config
import org.apache.hadoop.fs.Path
import org.apache.toree.plugins.annotations.Event

object AddJar {
  val HADOOP_FS_SCHEMES = Set("hdfs", "s3", "s3n", "file")

  private var jarDir:Option[String] = None

  def getJarDir(config: Config): String = {
    jarDir.getOrElse({
      jarDir = Some(
        if(config.hasPath("jar_dir") && Files.exists(Paths.get(config.getString("jar_dir")))) {
          config.getString("jar_dir")
        } else {
          FileUtils.createManagedTempDirectory("toree_add_jars").getAbsolutePath
        }
      )
      jarDir.get
    })
  }
}

class AddJar
  extends LineMagic with IncludeInterpreter
  with IncludeOutputStream with DownloadSupport with ArgumentParsingSupport
  with IncludeKernel with IncludePluginManager with IncludeConfig with LogLike
{
  // Option to mark re-downloading of jars
  private val _force =
    parser.accepts("f", "forces re-download of specified jar")

  // Option to mark re-downloading of jars
  private val _magic =
    parser.accepts("magic", "loads jar as a magic extension")

  // Lazy because the outputStream is not provided at construction
  private def printStream = new PrintStream(outputStream)

  )
      } else {
        downloadFile(
          new URL(jarRemoteLocation),
          new File(downloadLocation).toURI.toURL
        )
      }

      // Report download finished
      printStream.println(s"Finished download of $jarName")
    } else {
      printStream.println(s"Using cached version of $jarName")
    }

    // validate jar file
    if(! isValidJar(fileDownloadLocation)) {
      throw new IllegalArgumentException(s"Jar '$jarName' is not valid.")
    }

    if (_magic) {
      val plugins = pluginManager.loadPlugins(fileDownloadLocation)
      pluginManager.initializePlugins(plugins)
    } else {
      kernel.addJars(fileDownloadLocation.toURI)
    }
  }
} 
Example 187
Source File: SocketConfig.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import com.typesafe.config.Config
import play.api.libs.json.Json

case class SocketConfig (
  stdin_port: Int,
  control_port: Int,
  hb_port: Int,
  shell_port: Int,
  iopub_port: Int,
  ip : String,
  transport: String,
  signature_scheme: String,
  key: String
)

object SocketConfig {
  implicit val socketConfigReads = Json.reads[SocketConfig]
  implicit val socketConfigWrites = Json.writes[SocketConfig]

  def fromConfig(config: Config) = {
    new SocketConfig(
      config.getInt("stdin_port"),
      config.getInt("control_port"),
      config.getInt("hb_port"),
      config.getInt("shell_port"),
      config.getInt("iopub_port"),
      config.getString("ip"),
      config.getString("transport"),
      config.getString("signature_scheme"),
      config.getString("key")
    )
  }
} 
Example 188
Source File: FactoryMethods.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.api

import java.io.{InputStream, OutputStream}

import org.apache.toree.kernel.protocol.v5
import org.apache.toree.kernel.protocol.v5.{KMBuilder, KernelMessage}
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.stream.{KernelOutputStream, KernelInputStream}
import com.typesafe.config.Config


  override def newKernelOutputStream(
    streamType: String = KernelOutputStream.DefaultStreamType,
    sendEmptyOutput: Boolean = config.getBoolean("send_empty_output")
  ): OutputStream = {
    new v5.stream.KernelOutputStream(
      actorLoader,
      kmBuilder,
      org.apache.toree.global.ScheduledTaskManager.instance,
      streamType = streamType,
      sendEmptyOutput = sendEmptyOutput
    )
  }
} 
Example 189
Source File: InterpreterManager.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.boot.layer

import org.apache.toree.kernel.api.KernelLike
import com.typesafe.config.Config
import org.apache.toree.interpreter._
import scala.collection.JavaConverters._

import org.slf4j.LoggerFactory

case class InterpreterManager(
  default: String = "Scala",
  interpreters: Map[String, Interpreter] = Map[String, Interpreter]()
) {


  def initializeInterpreters(kernel: KernelLike): Unit = {
    interpreters.values.foreach(interpreter =>
      interpreter.init(kernel)
    )
  }

  def addInterpreter(
    name:String,
    interpreter: Interpreter
  ): InterpreterManager = {
    copy(interpreters = interpreters + (name -> interpreter))
  }

  def defaultInterpreter: Option[Interpreter] = {
    interpreters.get(default)
  }
}

object InterpreterManager {

  protected val logger = LoggerFactory.getLogger(this.getClass.getName)

  def apply(config: Config): InterpreterManager = {
    val ip = config.getStringList("interpreter_plugins").asScala ++
      config.getStringList("default_interpreter_plugin").asScala

    val m = ip.foldLeft(Map[String, Interpreter]())( (acc, v) => {

      v.split(":") match {
        case Array(name, className) =>
          try {
            val i = instantiate(className, config)
            acc + (name -> i)
          }
          catch {
            case e:Throwable =>
              logger.error("Error loading interpreter class " + className)
              logger.error(e.getMessage())
              //acc
              throw e
          }
        case _ => acc
      }
    })

    val default = config.getString("default_interpreter")

    InterpreterManager(interpreters = m, default = default)
  }

  
  private def instantiate(className:String, config:Config):Interpreter = {
    try {
      Class
        .forName(className)
        .getConstructor(Class.forName("com.typesafe.config.Config"))
        .newInstance(config).asInstanceOf[Interpreter]
    }
    catch {
      case e: NoSuchMethodException =>
        logger.debug("Using default constructor for class " + className)
        Class
          .forName(className)
          .newInstance().asInstanceOf[Interpreter]
    }

  }

} 
Example 190
Source File: SparkKernelClientDeployer.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package test.utils.root

import org.apache.toree.kernel.protocol.v5.client.boot.ClientBootstrap
import org.apache.toree.kernel.protocol.v5.client.boot.layers._
import com.typesafe.config.{ConfigFactory, Config}


object SparkKernelClientDeployer {
   lazy val startInstance = {
     val profileJSON: String = """
     {
         "stdin_port":   48691,
         "control_port": 40544,
         "hb_port":      43462,
         "shell_port":   44808,
         "iopub_port":   49691,
         "ip": "127.0.0.1",
         "transport": "tcp",
         "signature_scheme": "hmac-sha256",
         "key": ""
     }
                               """.stripMargin
     val config: Config = ConfigFactory.parseString(profileJSON)
     (new ClientBootstrap(config)
       with StandardSystemInitialization
       with StandardHandlerInitialization).createClient()
   }
 } 
Example 191
Source File: OrderServiceApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.integration

import scala.concurrent.Future

import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging
import akka.util.Timeout

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

import com.typesafe.config.{ Config, ConfigFactory } 

object OrderServiceApp extends App
    with RequestTimeout {
  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher

  val processOrders = system.actorOf(
    Props(new ProcessOrders), "process-orders"
  )

  val api = new OrderServiceApi(system, 
    requestTimeout(config), 
    processOrders).routes
 
  implicit val materializer = ActorMaterializer()
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "order-service")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.failed.foreach { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
}


trait RequestTimeout {
  import scala.concurrent.duration._
  def requestTimeout(config: Config): Timeout = {
    val t = config.getString("akka.http.server.request-timeout")
    val d = Duration(t)
    FiniteDuration(d.length, d.unit)
  }
} 
Example 192
Source File: ShoppersServiceSupport.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence.rest

import com.typesafe.config.Config

import scala.concurrent.Future

import akka.actor._
import akka.event.Logging
import akka.util.Timeout

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.stream.ActorMaterializer

import aia.persistence._

trait ShoppersServiceSupport extends RequestTimeout {
  def startService(shoppers: ActorRef)(implicit system: ActorSystem) = {
    val config = system.settings.config
    val settings = Settings(system)
    val host = settings.http.host
    val port = settings.http.port

    implicit val ec = system.dispatcher  //bindAndHandle requires an implicit ExecutionContext

    val api = new ShoppersService(shoppers, system, requestTimeout(config)).routes // the RestApi provides a Route
 
    implicit val materializer = ActorMaterializer()
    val bindingFuture: Future[ServerBinding] =
      Http().bindAndHandle(api, host, port)
   
    val log =  Logging(system.eventStream, "shoppers")
    bindingFuture.map { serverBinding =>
      log.info(s"Shoppers API bound to ${serverBinding.localAddress} ")
    }.failed.foreach { 
      case ex: Exception =>
        log.error(ex, "Failed to bind to {}:{}!", host, port)
        system.terminate()
    }
  }
}

trait RequestTimeout {
  import scala.concurrent.duration._
  def requestTimeout(config: Config): Timeout = {
    val t = config.getString("akka.http.server.request-timeout")
    val d = Duration(t)
    FiniteDuration(d.length, d.unit)
  }
} 
Example 193
Source File: MonitorMailbox.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.performance.monitor

import akka.actor.{ ActorRef, ActorSystem }
import akka.dispatch._
import scala.Some
import java.util.Queue
import com.typesafe.config.Config
import java.util.concurrent.ConcurrentLinkedQueue
import akka.dispatch.{ MailboxType, MessageQueue, UnboundedMessageQueueSemantics }
import akka.event.LoggerMessageQueueSemantics

case class MonitorEnvelope(queueSize: Int,
                           receiver: String,
                           entryTime: Long,
                           handle: Envelope)

case class MailboxStatistics(queueSize: Int,
                             receiver: String,
                             sender: String,
                             entryTime: Long,
                             exitTime: Long)




class MonitorQueue(val system: ActorSystem)
    extends MessageQueue
    with UnboundedMessageQueueSemantics
    with LoggerMessageQueueSemantics {
  private final val queue = new ConcurrentLinkedQueue[MonitorEnvelope]()



  def numberOfMessages = queue.size
  def hasMessages = !queue.isEmpty

  def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
    if (hasMessages) {
      var envelope = dequeue
      while (envelope ne null) {
        deadLetters.enqueue(owner, envelope)
        envelope = dequeue
      }
    }
  }



  def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
    val env = MonitorEnvelope(queueSize = queue.size() + 1,
      receiver = receiver.toString(),
      entryTime = System.currentTimeMillis(),
      handle = handle)
    queue add env
  }



  def dequeue(): Envelope = {
    val monitor = queue.poll()
    if (monitor != null) {
      monitor.handle.message match {
        case stat: MailboxStatistics => //skip message <co id="ch14-mailbox-dequeue-1" />
        case _ => {
          val stat = MailboxStatistics(
            queueSize = monitor.queueSize,
            receiver = monitor.receiver,
            sender = monitor.handle.sender.toString(),
            entryTime = monitor.entryTime,
            exitTime = System.currentTimeMillis())
          system.eventStream.publish(stat)
        }
      }
      monitor.handle
    } else {
      null
    }
  }


}



class MonitorMailboxType(settings: ActorSystem.Settings, config: Config)
    extends MailboxType 
    with ProducesMessageQueue[MonitorQueue]{

  final override def create(owner: Option[ActorRef],
                            system: Option[ActorSystem]): MessageQueue = {
    system match {
      case Some(sys) =>
        new MonitorQueue(sys)
      case _ =>
        throw new IllegalArgumentException("requires a system")
    }
  }
} 
Example 194
Source File: Main.scala    From 006877   with MIT License 5 votes vote down vote up
package com.goticks

import scala.concurrent.Future
import scala.util.{Failure, Success}

import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging
import akka.util.Timeout

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

import com.typesafe.config.{ Config, ConfigFactory }

object Main extends App
    with RequestTimeout {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host") // 설정으로부터 호스트와 포트를 가져온다
  val port = config.getInt("http.port")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher  // bindAndHandle은 비동기적이며, ExecutionContext를 암시적으로 사용해야 한다

  val api = new RestApi(system, requestTimeout(config)).routes // RestApi는 HTTP 루트를 제공한다
 
  implicit val materializer = ActorMaterializer()
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port) // RestApi 루트를 가지고 HTTP 서버를 시작한다
 
  val log =  Logging(system.eventStream, "go-ticks")
  bindingFuture.map { serverBinding =>
    log.info(s"RestApi bound to ${serverBinding.localAddress} ")
  }.onComplete { 
    case Success(v) =>
	case Failure(ex) =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
}

trait RequestTimeout {
  import scala.concurrent.duration._
  def requestTimeout(config: Config): Timeout = {
    val t = config.getString("akka.http.server.request-timeout")
    val d = Duration(t)
    FiniteDuration(d.length, d.unit)
  }
} 
Example 195
Source File: ResumingEventFilter.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Path, Paths }
import java.nio.file.StandardOpenOption
import java.nio.file.StandardOpenOption._

import scala.concurrent.Future

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, IOResult }
import akka.util.ByteString

import spray.json._
import com.typesafe.config.{ Config, ConfigFactory }

object ResumingEventFilter extends App with EventMarshalling {
  val config = ConfigFactory.load() 
  val maxLine = config.getInt("log-stream-processor.max-line")
  
  if(args.length != 3) {
    System.err.println("Provide args: input-file output-file state")
    System.exit(1)
  }

  val inputFile = FileArg.shellExpanded(args(0))
  val outputFile = FileArg.shellExpanded(args(1))

  val filterState = args(2) match {
    case State(state) => state
    case unknown => 
      System.err.println(s"Unknown state $unknown, exiting.") 
      System.exit(1)
  }
  import akka.stream.scaladsl._

  val source: Source[ByteString, Future[IOResult]] = 
    FileIO.fromPath(inputFile)

  val sink: Sink[ByteString, Future[IOResult]] = 
    FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND))

  val frame: Flow[ByteString, String, NotUsed] =  
    Framing.delimiter(ByteString("\n"), maxLine) 
      .map(_.decodeString("UTF8"))


  import akka.stream.ActorAttributes
  import akka.stream.Supervision

  import LogStreamProcessor.LogParseException

  val decider : Supervision.Decider = {
    case _: LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }

  val parse: Flow[String, Event, NotUsed] = 
    Flow[String].map(LogStreamProcessor.parseLineEx) 
      .collect { case Some(e) => e }
      .withAttributes(ActorAttributes.supervisionStrategy(decider))


  val filter: Flow[Event, Event, NotUsed] =   
    Flow[Event].filter(_.state == filterState)
  
  val serialize: Flow[Event, ByteString, NotUsed] =  
    Flow[Event].map(event => ByteString(event.toJson.compactPrint))

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher


  val graphDecider : Supervision.Decider = { 
    case _: LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }

  import akka.stream.ActorMaterializerSettings
  implicit val materializer = ActorMaterializer(
    ActorMaterializerSettings(system)
      .withSupervisionStrategy(graphDecider)
  )



  val composedFlow: Flow[ByteString, ByteString, NotUsed] =  
    frame.via(parse)
      .via(filter)
      .via(serialize)

  val runnableGraph: RunnableGraph[Future[IOResult]] = 
    source.via(composedFlow).toMat(sink)(Keep.right)

  runnableGraph.run().foreach { result =>
    println(s"Wrote ${result.count} bytes to '$outputFile'.")
    system.terminate()
  }  

} 
Example 196
Source File: BidiEventFilter.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Path, Paths }
import java.nio.file.StandardOpenOption
import java.nio.file.StandardOpenOption._


import scala.concurrent.Future

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, IOResult }
import akka.stream.scaladsl._
import akka.stream.scaladsl.JsonFraming
import akka.util.ByteString

import spray.json._
import com.typesafe.config.{ Config, ConfigFactory }

object BidiEventFilter extends App with EventMarshalling {
  val config = ConfigFactory.load() 
  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsonObject = config.getInt("log-stream-processor.max-json-object")

  if(args.length != 5) {
    System.err.println("Provide args: input-format output-format input-file output-file state")
    System.exit(1)
  }

  val inputFile = FileArg.shellExpanded(args(2))
  val outputFile = FileArg.shellExpanded(args(3))
  val filterState = args(4) match {
    case State(state) => state
    case unknown => 
      System.err.println(s"Unknown state $unknown, exiting.") 
      System.exit(1)
  }


  val inFlow: Flow[ByteString, Event, NotUsed] = 
    if(args(0).toLowerCase == "json") {
      JsonFraming.objectScanner(maxJsonObject)
      .map(_.decodeString("UTF8").parseJson.convertTo[Event])
    } else {
      Framing.delimiter(ByteString("\n"), maxLine)
        .map(_.decodeString("UTF8"))
        .map(LogStreamProcessor.parseLineEx)
        .collect { case Some(event) => event }
    }

  val outFlow: Flow[Event, ByteString, NotUsed] = 
    if(args(1).toLowerCase == "json") {
      Flow[Event].map(event => ByteString(event.toJson.compactPrint))
    } else {
      Flow[Event].map{ event => 
        ByteString(LogStreamProcessor.logLine(event))
      }
    }
  val bidiFlow = BidiFlow.fromFlows(inFlow, outFlow)

    
  val source: Source[ByteString, Future[IOResult]] = 
    FileIO.fromPath(inputFile)

  val sink: Sink[ByteString, Future[IOResult]] = 
    FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND))
  

  val filter: Flow[Event, Event, NotUsed] =   
    Flow[Event].filter(_.state == filterState)

  val flow = bidiFlow.join(filter)


  val runnableGraph: RunnableGraph[Future[IOResult]] = 
    source.via(flow).toMat(sink)(Keep.right)

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  implicit val materializer = ActorMaterializer()

  runnableGraph.run().foreach { result =>
    println(s"Wrote ${result.count} bytes to '$outputFile'.")
    system.terminate()
  }  
} 
Example 197
Source File: FanLogsApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object FanLogsApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }
  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsObject = config.getInt("log-stream-processor.max-json-object")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new FanLogsApi(logsDir, maxLine, maxJsObject).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "fan-logs")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 198
Source File: LogsApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object LogsApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }
  val maxLine = config.getInt("log-stream-processor.max-line")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Stop
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new LogsApi(logsDir, maxLine).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "logs")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 199
Source File: LogStreamProcessorApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object LogStreamProcessorApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }

  val notificationsDir = {
    val dir = config.getString("log-stream-processor.notifications-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }

  val metricsDir = {
    val dir = config.getString("log-stream-processor.metrics-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }

  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsObject = config.getInt("log-stream-processor.max-json-object")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new LogStreamProcessorApi(logsDir, notificationsDir, metricsDir, maxLine, maxJsObject).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "processor")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 200
Source File: ContentNegLogsApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object ContentNegLogsApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }
  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsObject = config.getInt("log-stream-processor.max-json-object")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Stop
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new ContentNegLogsApi(logsDir, maxLine, maxJsObject).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "content-neg-logs")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
}