org.json4s.DefaultFormats Scala Examples

The following examples show how to use org.json4s.DefaultFormats. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RWrappers.scala    From drizzle-spark   with Apache License 2.0 6 votes vote down vote up
package org.apache.spark.ml.r

import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

import org.apache.spark.SparkException
import org.apache.spark.ml.util.MLReader


private[r] object RWrappers extends MLReader[Object] {

  override def load(path: String): Object = {
    implicit val format = DefaultFormats
    val rMetadataPath = new Path(path, "rMetadata").toString
    val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
    val rMetadata = parse(rMetadataStr)
    val className = (rMetadata \ "class").extract[String]
    className match {
      case "org.apache.spark.ml.r.NaiveBayesWrapper" => NaiveBayesWrapper.load(path)
      case "org.apache.spark.ml.r.AFTSurvivalRegressionWrapper" =>
        AFTSurvivalRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper" =>
        GeneralizedLinearRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.KMeansWrapper" =>
        KMeansWrapper.load(path)
      case "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper" =>
        MultilayerPerceptronClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.LDAWrapper" =>
        LDAWrapper.load(path)
      case "org.apache.spark.ml.r.IsotonicRegressionWrapper" =>
        IsotonicRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GaussianMixtureWrapper" =>
        GaussianMixtureWrapper.load(path)
      case "org.apache.spark.ml.r.ALSWrapper" =>
        ALSWrapper.load(path)
      case "org.apache.spark.ml.r.LogisticRegressionWrapper" =>
        LogisticRegressionWrapper.load(path)
      case _ =>
        throw new SparkException(s"SparkR read.ml does not support load $className")
    }
  }
} 
Example 2
Source File: HttpUtil.scala    From sparta   with Apache License 2.0 6 votes vote down vote up
package com.stratio.benchmark.generator.utils

import org.apache.http.HttpStatus
import org.apache.http.client.methods.{HttpDelete, HttpGet, HttpPost, HttpPut}
import org.apache.http.entity.StringEntity
import org.apache.http.impl.client.HttpClientBuilder
import org.apache.http.util.EntityUtils
import org.apache.log4j.Logger
import org.json4s.DefaultFormats
import org.json4s.native.JsonMethods._

import scala.io.Source

trait HttpUtil   {

  private val logger = Logger.getLogger(this.getClass)

  
  def createPolicy(policyContent: String, endpoint: String)(implicit defaultFormats: DefaultFormats): String = {

    val policyName = (parse(policyContent) \ "name").extract[String]

    // If the policy exists when it launches the benchmark, it should stop and delete it.
    getPolicyId(policyName, endpoint) match {
      case Some(id) =>
        stopPolicy(id, endpoint)
        deletePolicy(id, endpoint)
      case None => logger.debug(s"No policy with name $policyName exists in Sparta yet.")
    }

    val client = HttpClientBuilder.create().build()
    val post = new HttpPost(s"$endpoint/policyContext")
    post.setHeader("Content-type", "application/json")
    post.setEntity(new StringEntity(policyContent))
    val response = client.execute(post)

   if(response.getStatusLine.getStatusCode != HttpStatus.SC_OK)
     throw new IllegalStateException(s"Sparta status code is not OK: ${response.getStatusLine.getStatusCode}")
   else {
     val entity = response.getEntity
     val policyId = (parse(EntityUtils.toString(entity)) \ "policyId").extract[String]
     policyId
   }
  }

  def getPolicyId(name: String, endpoint: String)(implicit defaultFormats: DefaultFormats): Option[String] = {
    val client = HttpClientBuilder.create().build()
    val get = new HttpGet(s"$endpoint/policy/findByName/$name")

    val response = client.execute(get)

    response.getStatusLine.getStatusCode match {
      case HttpStatus.SC_OK =>
        Option((parse(EntityUtils.toString(response.getEntity)) \ "id").extract[String])
      case _ => None
    }
  }

  def stopPolicy(id: String, endpoint: String): Unit = {
    val client = HttpClientBuilder.create().build()
    val put = new HttpPut(s"$endpoint/policyContext")
    put.setHeader("Content-Type", "application/json")
    val entity = new StringEntity(s"""{"id":"$id", "status":"Stopping"}""")
    put.setEntity(entity)
    val response = client.execute(put)

    if(response.getStatusLine.getStatusCode != HttpStatus.SC_CREATED) {
      logger.info(Source.fromInputStream(response.getEntity.getContent).mkString(""))
      logger.info(s"Sparta status code is not OK: ${response.getStatusLine.getStatusCode}")
    }
  }

  def deletePolicy(id: String, endpoint: String): Unit = {
    val client = HttpClientBuilder.create().build()
    val delete = new HttpDelete(s"$endpoint/policy/$id")
    val response = client.execute(delete)

    if(response.getStatusLine.getStatusCode != HttpStatus.SC_OK)
      logger.info(s"Sparta status code is not OK: ${response.getStatusLine.getStatusCode}")
  }
} 
Example 3
Source File: JsonVectorConverter.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}

private[ml] object JsonVectorConverter {

  
  def toJson(v: Vector): String = {
    v match {
      case SparseVector(size, indices, values) =>
        val jValue = ("type" -> 0) ~
          ("size" -> size) ~
          ("indices" -> indices.toSeq) ~
          ("values" -> values.toSeq)
        compact(render(jValue))
      case DenseVector(values) =>
        val jValue = ("type" -> 1) ~ ("values" -> values.toSeq)
        compact(render(jValue))
    }
  }
} 
Example 4
Source File: JsonRpcServer.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.jsonrpc.server

import java.security.SecureRandom

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.HttpOriginRange
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{MalformedRequestContentRejection, RejectionHandler, Route}
import ch.megard.akka.http.cors.javadsl.CorsRejection
import ch.megard.akka.http.cors.scaladsl.CorsDirectives._
import ch.megard.akka.http.cors.scaladsl.settings.CorsSettings
import de.heikoseeberger.akkahttpjson4s.Json4sSupport
import io.iohk.ethereum.jsonrpc.{JsonRpcController, JsonRpcErrors, JsonRpcRequest, JsonRpcResponse}
import io.iohk.ethereum.utils.Logger
import org.json4s.JsonAST.JInt
import org.json4s.{DefaultFormats, native}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

trait JsonRpcServer extends Json4sSupport {
  val jsonRpcController: JsonRpcController

  implicit val serialization = native.Serialization

  implicit val formats = DefaultFormats

  def corsAllowedOrigins: HttpOriginRange

  val corsSettings = CorsSettings.defaultSettings.copy(
    allowGenericHttpRequests = true,
    allowedOrigins = corsAllowedOrigins
  )

  implicit def myRejectionHandler: RejectionHandler =
    RejectionHandler.newBuilder()
      .handle {
        case _: MalformedRequestContentRejection =>
          complete((StatusCodes.BadRequest, JsonRpcResponse("2.0", None, Some(JsonRpcErrors.ParseError), JInt(0))))
        case _: CorsRejection =>
          complete(StatusCodes.Forbidden)
      }
      .result()

  val route: Route = cors(corsSettings) {
    (pathEndOrSingleSlash & post) {
      entity(as[JsonRpcRequest]) { request =>
        handleRequest(request)
      } ~ entity(as[Seq[JsonRpcRequest]]) { request =>
        handleBatchRequest(request)
      }
    }
  }

  
  def run(): Unit

  private def handleRequest(request: JsonRpcRequest) = {
    complete(jsonRpcController.handleRequest(request))
  }

  private def handleBatchRequest(requests: Seq[JsonRpcRequest]) = {
    complete(Future.sequence(requests.map(request => jsonRpcController.handleRequest(request))))
  }
}

object JsonRpcServer extends Logger {

  def apply(jsonRpcController: JsonRpcController, config: JsonRpcServerConfig, secureRandom: SecureRandom)
           (implicit actorSystem: ActorSystem): Either[String, JsonRpcServer] = config.mode match {
    case "http" => Right(new JsonRpcHttpServer(jsonRpcController, config)(actorSystem))
    case "https" => Right(new JsonRpcHttpsServer(jsonRpcController, config, secureRandom)(actorSystem))
    case _ => Left(s"Cannot start JSON RPC server: Invalid mode ${config.mode} selected")
  }

  trait JsonRpcServerConfig {
    val mode: String
    val enabled: Boolean
    val interface: String
    val port: Int
    val certificateKeyStorePath: Option[String]
    val certificateKeyStoreType: Option[String]
    val certificatePasswordFile: Option[String]
    val corsAllowedOrigins: HttpOriginRange
  }


} 
Example 5
Source File: EncryptedKeyJsonCodec.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.keystore

import java.util.UUID

import akka.util.ByteString
import io.iohk.ethereum.domain.Address
import io.iohk.ethereum.keystore.EncryptedKey._
import org.json4s.JsonAST.{JObject, JString, JValue}
import org.json4s.JsonDSL._
import org.json4s.native.JsonMethods._
import org.json4s.{CustomSerializer, DefaultFormats, Extraction, JField}
import org.spongycastle.util.encoders.Hex

import scala.util.Try

object EncryptedKeyJsonCodec {

  private val byteStringSerializer = new CustomSerializer[ByteString](_ => (
    { case JString(s) => ByteString(Hex.decode(s)) },
    { case bs: ByteString => JString(Hex.toHexString(bs.toArray)) }
  ))

  private implicit val formats = DefaultFormats + byteStringSerializer

  private def asHex(bs: ByteString): String =
    Hex.toHexString(bs.toArray)

  def toJson(encKey: EncryptedKey): String = {
    import encKey._
    import cryptoSpec._

    val json =
      ("id" -> id.toString) ~
      ("address" -> asHex(address.bytes)) ~
      ("version" -> version) ~
      ("crypto" -> (
        ("cipher" -> cipher) ~
        ("ciphertext" -> asHex(ciphertext)) ~
        ("cipherparams" -> ("iv" -> asHex(iv))) ~
        encodeKdf(kdfParams) ~
        ("mac" -> asHex(mac))
      ))

    pretty(render(json))
  }

  def fromJson(jsonStr: String): Either[String, EncryptedKey] = Try {
    val json = parse(jsonStr).transformField { case JField(k, v) => JField(k.toLowerCase, v) }

    val uuid = UUID.fromString((json \ "id").extract[String])
    val address = Address((json \ "address").extract[String])
    val version = (json \ "version").extract[Int]

    val crypto = json \ "crypto"
    val cipher = (crypto \ "cipher").extract[String]
    val ciphertext = (crypto \ "ciphertext").extract[ByteString]
    val iv = (crypto \ "cipherparams" \ "iv").extract[ByteString]
    val mac = (crypto \ "mac").extract[ByteString]

    val kdfParams = extractKdf(crypto)
    val cryptoSpec = CryptoSpec(cipher, ciphertext, iv, kdfParams, mac)
    EncryptedKey(uuid, address, cryptoSpec, version)

  }.fold(ex => Left(ex.toString), encKey => Right(encKey))

  private def encodeKdf(kdfParams: KdfParams): JObject =
    kdfParams match {
      case ScryptParams(salt, n, r, p, dklen) =>
        ("kdf" -> Scrypt) ~
        ("kdfparams" -> Extraction.decompose(kdfParams))

      case Pbkdf2Params(salt, prf, c, dklen) =>
        ("kdf" -> Pbkdf2) ~
        ("kdfparams" -> Extraction.decompose(kdfParams))
    }

  private def extractKdf(crypto: JValue): KdfParams = {
    val kdf = (crypto \ "kdf").extract[String]
    kdf.toLowerCase match {
      case Scrypt =>
        (crypto \ "kdfparams").extract[ScryptParams]

      case Pbkdf2 =>
        (crypto \ "kdfparams").extract[Pbkdf2Params]
    }
  }

} 
Example 6
Source File: JsonSerializer.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.serializer.json

import akka.serialization.Serializer
import com.github.dnvriend.domain.OrderDomain
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization
import org.json4s.native.Serialization._
import org.json4s.{ DefaultFormats, Formats, NoTypeHints }

case class EventWrapper(manifest: String, payload: String)

class JsonSerializer extends Serializer {

  implicit val formats: Formats = DefaultFormats + OrderDomain.DirectDebitTypeSerializer

  override def identifier: Int = Int.MaxValue

  override def includeManifest: Boolean = true

  override def toBinary(o: AnyRef): Array[Byte] =
    write(EventWrapper(o.getClass.getName, write(o))).getBytes()

  override def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = {
    val wrapper: EventWrapper = parse(new String(bytes)).extract[EventWrapper]
    implicit val mf = Manifest.classType(Class.forName(wrapper.manifest))
    read(wrapper.payload)
  }
} 
Example 7
Source File: FuturesStacking.scala    From web3scala   with Apache License 2.0 5 votes vote down vote up
import dispatch._
import Defaults._
import org.json4s.DefaultFormats
import org.web3scala.Service
import org.web3scala.model.{Block, BlockName, BlockNumber}
import org.web3scala.util.Utils

object FuturesStacking extends App {

  val service = new Service

  implicit val formats: DefaultFormats.type = DefaultFormats

  
  def highestBalance(requestParams: (String, Block)*) = {

    // execute async requests
    val responses =
      for (requestParam <- requestParams)
        yield requestParam._1 -> service.asyncEthGetBalance(requestParam._1, requestParam._2)

    // parse responses
    val futures =
      for (response <- responses)
        yield for (json <- response._2.future)
          yield response._1 -> Utils.hex2long((json \ "result").extract[String])

    // select max balance and return corresponding address
    for (future <- Future.sequence(futures))
      yield future.maxBy(_._2)._1
  }

  val rq1 = ("0x1f2e3994505ea24642d94d00a4bcf0159ed1a617", BlockName("latest"))
  val rq2 = ("0xf9C510e90bCb47cc49549e57b80814aE3A8bb683", BlockName("pending"))
  val rq3 = ("0x902c4fD71e196E86e7C82126Ff88ADa63a590d22", BlockNumber(1559297))

  val result = highestBalance(rq1, rq2, rq3)

  println("Highest Balance: " + result())

  System.exit(0)
} 
Example 8
Source File: ModuleResourceInfo.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.domain

import com.webank.wedatasphere.linkis.common.ServiceInstance
import com.webank.wedatasphere.linkis.resourcemanager.{Resource, ResourceSerializer}
import org.json4s.JsonAST.JObject
import org.json4s.JsonDSL._
import org.json4s.{CustomSerializer, DefaultFormats, Extraction}


case class ModuleResourceInfo(moduleInstance: ServiceInstance, totalResource: Resource, usedResource: Resource)

object ModuleResourceInfoSerializer extends CustomSerializer[ModuleResourceInfo](implicit formats => ( {
  case JObject(List(("ModuleResourceInfo", JObject(List(("moduleInstance", moduleInstance), ("totalResource", totalResource), ("usedResource", usedResource)))))) =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    ModuleResourceInfo(moduleInstance.extract[ServiceInstance], totalResource.extract[Resource], usedResource.extract[Resource])
}, {
  case m: ModuleResourceInfo =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    ("ModuleResourceInfo", new JObject(List(("moduleInstance", Extraction.decompose(m.moduleInstance)), ("totalResource", Extraction.decompose(m.totalResource)), ("usedResource", Extraction.decompose(m.usedResource)))))
}))

case class ModuleResourceRecord(moduleInfo: ModuleInfo, moduleUsedResources: Resource, moduleLeftResource: Resource, moduleLockedResource: Resource, registerTime: Long = System.currentTimeMillis()) 
Example 9
Source File: ModuleInfo.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.domain

import com.webank.wedatasphere.linkis.common.ServiceInstance
import com.webank.wedatasphere.linkis.resourcemanager.ResourceRequestPolicy.ResourceRequestPolicy
import com.webank.wedatasphere.linkis.resourcemanager.{Resource, ResourceRequestPolicy, ResourceSerializer}
import org.json4s.JsonAST.JObject
import org.json4s.{CustomSerializer, DefaultFormats, Extraction}


case class ModuleInfo(moduleInstance: ServiceInstance,
                      totalResource: Resource,
                      protectedResource: Resource, //Enter the protection mode when the resource reaches(当资源达到多少时,进入保护模式)
                      resourceRequestPolicy: ResourceRequestPolicy
                     )

object ModuleInfoSerializer extends CustomSerializer[ModuleInfo](implicit formats => ( {
  case JObject(List(("moduleInstance", moduleInstance), ("totalResource", totalResource), ("protectedResource", protectedResource), ("resourceRequestPolicy", resourceRequestPolicy))) =>
    implicit val formats = DefaultFormats + ResourceSerializer + ModuleInstanceSerializer
    new ModuleInfo(moduleInstance.extract[ServiceInstance], totalResource.extract[Resource], protectedResource.extract[Resource], ResourceRequestPolicy.withName(resourceRequestPolicy.extract[String]))
}, {
  case i: ModuleInfo =>
    implicit val formats = DefaultFormats + ResourceSerializer + ModuleInstanceSerializer
    val policy = Extraction.decompose(i.resourceRequestPolicy.toString)
    new JObject(List(("moduleInstance", Extraction.decompose(i.moduleInstance)), ("totalResource", Extraction.decompose(i.totalResource)), ("protectedResource", Extraction.decompose(i.protectedResource)), ("resourceRequestPolicy", policy)))
})
) 
Example 10
Source File: UserResourceInfo.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.domain

import com.webank.wedatasphere.linkis.common.ServiceInstance
import com.webank.wedatasphere.linkis.resourcemanager.{Resource, ResourceSerializer}
import org.json4s.JsonAST.JObject
import org.json4s.JsonDSL._
import org.json4s.{CustomSerializer, DefaultFormats, Extraction}



trait UserResourceInfo

case class UserPreUsedResource(ticketId: String, moduleInstance: ServiceInstance, resource: Resource) extends UserResourceInfo

case class UserUsedResource(ticketId: String, moduleInstance: ServiceInstance, resource: Resource, engineInstance: ServiceInstance = null) extends UserResourceInfo

case class UserReleasedResource(ticketId: String, moduleInstance: ServiceInstance) extends UserResourceInfo


object UserResourceInfoSerializer extends CustomSerializer[UserResourceInfo](implicit formats => ( {
  case JObject(List(("UserPreUsedResource", JObject(List(("ticketId", ticketId), ("moduleInstance", moduleInstance), ("resource", resource)))))) =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    new UserPreUsedResource(ticketId.extract[String], moduleInstance.extract[ServiceInstance], resource.extract[Resource])
  case JObject(List(("UserUsedResource", JObject(List(("ticketId", ticketId), ("moduleInstance", moduleInstance), ("resource", resource), ("engineInstance", engineInstance)))))) =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    new UserUsedResource(ticketId.extract[String], moduleInstance.extract[ServiceInstance], resource.extract[Resource], engineInstance.extract[ServiceInstance])
  case JObject(List(("UserReleasedResource", JObject(List(("ticketId", ticketId), ("moduleInstance", moduleInstance)))))) =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    new UserReleasedResource(ticketId.extract[String], moduleInstance.extract[ServiceInstance])
}, {
  case d: UserPreUsedResource =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    ("UserPreUsedResource", new JObject(List(("ticketId", Extraction.decompose(d.ticketId)), ("moduleInstance", Extraction.decompose(d.moduleInstance)), ("resource", Extraction.decompose(d.resource)))))
  case d: UserUsedResource =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    ("UserUsedResource", new JObject(List(("ticketId", Extraction.decompose(d.ticketId)), ("moduleInstance", Extraction.decompose(d.moduleInstance)), ("resource", Extraction.decompose(d.resource)), ("engineInstance", Extraction.decompose(d.engineInstance)))))
  case d: UserReleasedResource =>
    implicit val formats = DefaultFormats + ModuleInstanceSerializer + ResourceSerializer
    ("UserReleasedResource", new JObject(List(("ticketId", Extraction.decompose(d.ticketId)), ("moduleInstance", Extraction.decompose(d.moduleInstance)))))
})
) 
Example 11
Source File: RPCProduct.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.rpc.transform

import java.lang.reflect.{ParameterizedType, Type}
import java.util

import com.webank.wedatasphere.linkis.DataWorkCloudApplication
import com.webank.wedatasphere.linkis.common.utils.Logging
import com.webank.wedatasphere.linkis.rpc.exception.DWCURIException
import com.webank.wedatasphere.linkis.server.{BDPJettyServerHelper, EXCEPTION_MSG, Message}
import org.apache.commons.lang.ClassUtils
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats, Serializer}

import scala.collection.JavaConversions


private[linkis] trait RPCProduct {

  def toMessage(t: Any): Message

  def notFound(): Message

  def ok(): Message

}
private[linkis] object RPCProduct extends Logging {
  private[rpc] val IS_SCALA_CLASS = "rpc_is_scala_class"
  private[rpc] val CLASS_VALUE = "rpc_object_class"
  private[rpc] val OBJECT_VALUE = "rpc_object_value"
  private[rpc] implicit var formats: Formats = DefaultFormats + JavaCollectionSerializer + JavaMapSerializer
  private var serializerClasses: List[Class[_]] = List.empty
  private val rpcProduct: RPCProduct = new RPCProduct {
    private val rpcFormats = DataWorkCloudApplication.getApplicationContext.getBeansOfType(classOf[RPCFormats])
    if(rpcFormats != null && !rpcFormats.isEmpty) {
      val serializers = JavaConversions.mapAsScalaMap(rpcFormats).map(_._2.getSerializers).toArray.flatMap(_.iterator)
      setFormats(serializers)
    }
    override def toMessage(t: Any): Message = {
      if(t == null) throw new DWCURIException(10001, "The transmitted bean is Null.(传输的bean为Null.)")
      val message = Message.ok("RPC Message.")
      if(isScalaClass(t)){
        message.data(IS_SCALA_CLASS, "true")
        message.data(OBJECT_VALUE, Serialization.write(t.asInstanceOf[AnyRef]))
      } else {
        message.data(IS_SCALA_CLASS, "false")
        message.data(OBJECT_VALUE, BDPJettyServerHelper.gson.toJson(t))
      }
      message.setMethod("/rpc/message")
      message.data(CLASS_VALUE, t.getClass.getName)
    }

    override def notFound(): Message = {
      val message = Message.error("RPC Message.")
      message.setMethod("/rpc/message")
      message.data(EXCEPTION_MSG, new DWCURIException(10000, "The service does not exist for the available Receiver.(服务不存在可用的Receiver.)").toMap)
    }

    override def ok(): Message = {
      val message = Message.ok("RPC Message.")
      message.setMethod("/rpc/message")
      message
    }
  }
  private[rpc] def setFormats(serializer: Array[Serializer[_]]): Unit ={
    this.formats = (serializer :+ JavaCollectionSerializer :+ JavaMapSerializer).foldLeft(DefaultFormats.asInstanceOf[Formats])(_ + _)
    serializerClasses = formats.customSerializers.map(s => getActualTypeClass(s.getClass.getGenericSuperclass))
      .filter(_ != null) ++: List(classOf[util.List[_]], classOf[util.Map[_, _]])
    info("RPC Serializers: " + this.formats.customSerializers.map(_.getClass.getSimpleName) + ", serializerClasses: " +
      "" + serializerClasses)
  }
  private def getActualTypeClass(classType: Type): Class[_] = classType match {
    case p: ParameterizedType =>
      val params = p.getActualTypeArguments
      if(params == null || params.isEmpty) null
      else getActualTypeClass(params(0))
    case c: Class[_] => c
    case _ => null
  }
  private[rpc] def isScalaClass(obj: Any): Boolean =
    (obj.isInstanceOf[Product] && obj.isInstanceOf[Serializable]) ||
      serializerClasses.exists(ClassUtils.isAssignable(obj.getClass, _)) ||
        obj.getClass.getName.startsWith("scala.")
  private[rpc] def getSerializableScalaClass(clazz: Class[_]): Class[_] =
    serializerClasses.find(ClassUtils.isAssignable(clazz, _)).getOrElse(clazz)
  def getRPCProduct: RPCProduct = rpcProduct
} 
Example 12
Source File: RPCFormatsTest.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.rpc

import java.lang.reflect.ParameterizedType
import java.util

import com.webank.wedatasphere.linkis.rpc.transform.{JavaCollectionSerializer, JavaMapSerializer}
import org.apache.commons.lang.ClassUtils
import org.json4s.JsonAST.JObject
import org.json4s.JsonDSL._
import org.json4s.jackson.Serialization
import org.json4s.reflect.ManifestFactory
import org.json4s.{CustomSerializer, DefaultFormats, Extraction}

object RPCFormatsTest {

  trait ResultResource
  class AvailableResource(val ticketId: String) extends ResultResource

  object ResultResourceSerializer extends CustomSerializer[ResultResource](implicit formats => ({
    case JObject(List(("AvailableResource", JObject(List(("ticketId", ticketId)))))) => new AvailableResource(ticketId.extract[String])
  },{
    case r: AvailableResource => ("AvailableResource", ("ticketId", Extraction.decompose(r.ticketId)))
  }))

  def testRPC1(args: Array[String]): Unit = {
    implicit val formats = DefaultFormats + ResultResourceSerializer
    val serializerClasses = formats.customSerializers.map(_.getClass.getGenericSuperclass match {
      case p: ParameterizedType =>
        val params = p.getActualTypeArguments
        if(params == null || params.isEmpty) null
        else params(0).asInstanceOf[Class[_]]
    }).filter(_ != null)
    val a = new AvailableResource("aaa")
    val str = Serialization.write(a)
    println(str)
    val clazz = classOf[AvailableResource]
    println(serializerClasses)
    val realClass1 = serializerClasses.find(ClassUtils.isAssignable(clazz, _))
    println(realClass1)
    val realClass = realClass1.getOrElse(clazz)
    val obj = Serialization.read(str)(formats, ManifestFactory.manifestOf(realClass))
    println(obj)
    println(classOf[Array[_]].getClass.getName)
  }

  case class TestCollection1(a: String, list: java.util.List[String])
  case class TestCollection2(a: String, list: java.util.Map[String, Integer])

  def testRPC2(args: Array[String]): Unit = {
    implicit val formats = DefaultFormats + JavaCollectionSerializer + JavaMapSerializer
    //    val a = TestCollection1("1", new util.ArrayList[String]())
    val a = TestCollection2("1", new util.HashMap[String, Integer]())
    //    a.list.add("1111")
    a.list.put("1111", 2)
    val str = Serialization.write(a)
    println(str)
    val realClass = classOf[TestCollection2]
    val obj = Serialization.read(str)(formats, ManifestFactory.manifestOf(realClass))
    println(obj)
  }

  def main(args: Array[String]): Unit = {
    testRPC2(args)
  }
} 
Example 13
Source File: BrowseResult.scala    From algoliasearch-client-scala   with MIT License 5 votes vote down vote up
package algolia.responses

import org.json4s.{DefaultFormats, JObject}

case class BrowseResult(
    cursor: Option[String],
    hits: Seq[JObject],
    processingTimeMS: Int,
    query: String,
    params: String,
    nbHits: Option[Int],
    page: Option[Int],
    histPerPage: Option[Int],
    nbPages: Option[Int]
) {

  implicit val formats: DefaultFormats = org.json4s.DefaultFormats

  def asHit[T <: Hit: Manifest]: Seq[T] = hits.map(_.extract[T])

  def as[T: Manifest]: Seq[T] = hits.map(_.extract[T])

  def asWithObjectID[T <: ObjectID: Manifest]: Seq[T] = hits.map(_.extract[T])

} 
Example 14
Source File: ScoresApiParser.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.scores

import org.joda.time.DateTime
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import liigavoitto.util.{ DateTimeNoMillisSerializer, Logging }

import scala.util.{ Failure, Success, Try }

case class Data(data: List[Sport])
case class Sport(id: String, series: List[Series])
case class Series(id: String, seasons: List[Season])
case class Season(id: String, stages: List[Stage])
case class Stage(id: String, matches: Option[List[Match]],
  standing: Option[List[LeagueTableEntry]],
  playerStatistics: Option[List[PlayerStatsEntry]])

case class Match(id: String, name: String, date: DateTime, status: String, teams: List[Team], feed: List[Feed] = List(), stats: GeneralMatchStats)
case class Feed(`type`: String, gameTime: Option[String], period: Option[String], player: Option[FeedPlayer], standing: Option[String], team: Option[Team], goalType: Option[String], saves: Option[String], timeInMins: Option[String], text: Option[String], beginTime: Option[String], endTime: Option[String])
case class FeedPlayer(id: String, name: PlayerName, meta: Option[PlayerMeta])
case class Team(id: String, name: String, abbr: String, meta: Meta, score: Option[Score], players: List[Player])
case class Score(now: Int, periods: List[Period], outcome: Outcome)
case class Period(id: String, score: Int)
case class Meta(images: List[Image], directives: Option[Map[String, Any]])
case class Image(id: String, imageType: String)
case class Outcome(wins: Int, draws: Int, losses: Int, otWins: Int, otLosses: Int)

case class Player(id: String, name: PlayerName, position: Option[String], specific: Map[String, Any], meta: Option[PlayerMeta])
case class PlayerName(first: String, last: String)
case class PlayerMeta(gender: Option[String] = None, country: Option[String] = None, tags: List[String] = Nil, directives: Map[String, String] = Map.empty)

case class LeagueTableEntry(
  team: Team,
  home: Option[LeagueTableResult] = None,
  away: Option[LeagueTableResult] = None,
  total: Option[LeagueTableResult] = None,
  specific: Option[Map[String, Any]] = None
)
case class LeagueTableResult(gamesPlayed: Int, outcome: Outcome, goals: Goals, points: Option[Int] = None, specific: Option[Map[String, Any]] = None)
case class Goals(score: Int, conceded: Int)

case class PlayerStatsEntry(player: PlayerStatsPlayer, teamStats: List[PlayerTeamStatsEntry])
case class PlayerTeamStatsEntry(team: Team, points: Int, goals: Int, assists: Int)
case class PlayerStatsPlayer(id: String, name: PlayerName)

case class GeneralMatchStats(attendance: Int)

trait ScoresApiParser extends Logging {
  implicit val formats = DefaultFormats + DateTimeNoMillisSerializer

  def parseMatchSport(json: String): Option[Sport] = extractData(json).map(_.head)
  def parseMatchList(json: String): Option[List[Match]] = extractData(json) match {
    case Some(sports) => if (sports.nonEmpty) Some(extractMatchesFromSport(sports.head).get) else Some(List())
    case None => None
  }
  def parseLeagueTable(json: String): Option[List[LeagueTableEntry]] = {
    extractData(json) match {
      case Some(sports) => if (sports.nonEmpty) Some(extractLeagueTableFromSport(sports.head).get) else Some(List())
      case None => None
    }
  }
  def parsePlayerStats(json: String): Option[List[PlayerStatsEntry]] = {
    extractData(json) match {
      case Some(sports) =>
        if (sports.nonEmpty)
          Some(extractPlayerStatsFromSport(sports.head).get)
        else Some(List())
      case None => None
    }
  }

  protected def extractMatchesFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.matches
  protected def extractLeagueTableFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.standing
  protected def extractPlayerStatsFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.playerStatistics
  protected def extractData(json: String) = {
    Try {
      log.debug(s"Sport JSON: $json")
      parse(json).extract[Data]
    } match {
      case Success(s) => Some(s.data)
      case Failure(e) =>
        log.info(s"Failed to parse '$json': " + e)
        None
    }
  }
} 
Example 15
Source File: Metadata.scala    From spark-ml-serving   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.spark_ml_serving.common

case class Metadata(
  `class`: String,
  timestamp: Long,
  sparkVersion: String,
  uid: String,
  paramMap: Map[String, Any],
  numFeatures: Option[Int] = None,
  numClasses: Option[Int]  = None,
  numTrees: Option[Int]    = None
) {
  def getAs[T](name: String): Option[T] = {
    paramMap.get(name).map(_.asInstanceOf[T])
  }

  def inputCol = getAs[String]("inputCol")

  def outputCol = getAs[String]("outputCol")
}

object Metadata {

  import org.json4s.DefaultFormats
  import org.json4s.jackson.JsonMethods._

  implicit val formats = DefaultFormats

  def fromJson(json: String): Metadata = {
    parse(json).extract[Metadata]
  }
} 
Example 16
Source File: Json4sSerializer.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkapersistence.json4s

import java.nio.ByteBuffer
import java.nio.charset.Charset

import akka.actor.ExtendedActorSystem
import akka.serialization.Serializer
import org.json4s.native.Serialization._
import org.json4s.{DefaultFormats, Formats, TypeHints}
import rhttpc.transport.json4s.{AllTypeHints, ObjectSerializer}

class Json4sSerializer(system: ExtendedActorSystem) extends Serializer {
  import Json4sSerializer._
  import rhttpc.transport.json4s.CommonFormats._

  override def identifier: Int = ID

  override def includeManifest: Boolean = true

  override def fromBinary(bytes: Array[Byte], manifestOpt: Option[Class[_]]): AnyRef = {
    implicit val manifest = manifestOpt match {
      case Some(x) => Manifest.classType(x)
      case None    => Manifest.AnyRef
    }
    read(new String(bytes, UTF8))
  }

  override def toBinary(o: AnyRef): Array[Byte] = {
    writePretty(o).getBytes(UTF8)
  }
}

object Json4sSerializer {
  private val UTF8: Charset = Charset.forName("UTF-8")
  private val ID: Int = ByteBuffer.wrap("json4s".getBytes(UTF8)).getInt
} 
Example 17
Source File: CommonFormats.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.json4s

import org.json4s.{DefaultFormats, Formats, TypeHints}

object CommonFormats {

  implicit val formats: Formats =
    new DefaultFormats {
      override def dateFormatter = DefaultFormats.losslessDate()
      override val typeHints: TypeHints = AllTypeHints
      override val strictOptionParsing: Boolean = true
    } +
    ExceptionSerializer +
    ObjectSerializer +
    IndexedSeqSerializer

} 
Example 18
Source File: ExceptionSerializerSpec.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.json4s

import org.json4s.{DefaultFormats, TypeHints}
import org.scalatest.{FlatSpec, Matchers, TryValues}

class ExceptionSerializerSpec extends FlatSpec with Matchers with TryValues {

  it should "round-trip serialize case class exception" in {
    roundTrip(CaseClassException(123))
  }

  it should "round-trip serialize exception with message" in {
    roundTrip(new ExceptionWithMessage("foo"))
  }

  it should "round-trip serialize exception with null message" in {
    roundTrip(new ExceptionWithMessage(null))
  }

  it should "round-trip serialize exception with message and cause" in {
    roundTrip(new ExceptionWithMessageAndCause("foo", CaseClassException(123)))
  }

  private def roundTrip(ex: Throwable): Unit = {
    implicit val formats = new DefaultFormats {
      override val typeHints: TypeHints = AllTypeHints
    } + ExceptionSerializer
    val serializer = new Json4sSerializer[Throwable]()
    val deserializer = new Json4sDeserializer[Throwable]()
    val serialized = serializer.serialize(ex)
    val deserialized = deserializer.deserialize(serialized)
    deserialized.success.value shouldEqual ex
  }

}

case class CaseClassException(x: Int) extends Exception(s"x: $x")

class ExceptionWithMessage(msg: String) extends Exception(msg) {
  def canEqual(other: Any): Boolean = other.isInstanceOf[ExceptionWithMessage]

  override def equals(other: Any): Boolean = other match {
    case that: ExceptionWithMessage =>
      (that canEqual this) &&
        getMessage == that.getMessage
    case _ => false
  }

  override def hashCode(): Int = {
    val state = Seq(getMessage)
    state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
  }
}

class ExceptionWithMessageAndCause(msg: String, cause: Throwable) extends Exception(msg, cause) {
  def canEqual(other: Any): Boolean = other.isInstanceOf[ExceptionWithMessageAndCause]

  override def equals(other: Any): Boolean = other match {
    case that: ExceptionWithMessageAndCause =>
      (that canEqual this) &&
        getMessage == that.getMessage &&
        getCause == that.getCause
    case _ => false
  }

  override def hashCode(): Int = {
    val state = Seq(getMessage, getCause)
    state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
  }
} 
Example 19
Source File: EventStatsServlet.scala    From spark-streaming-demo   with Apache License 2.0 5 votes vote down vote up
package com.datastax.examples.meetup

import org.joda.time.{DateTimeZone, DateTime, Duration}
import org.scalatra.scalate.ScalateSupport
import org.scalatra.{CorsSupport, ScalatraServlet}

import scala.concurrent.Await
import scala.concurrent.duration._
import org.json4s.{DefaultFormats, Formats}
import org.scalatra.json._

class EventStatsServlet() extends ScalatraServlet with CorsSupport with JacksonJsonSupport with ScalateSupport
{
  protected implicit val jsonFormats: Formats = DefaultFormats

  before() {
    contentType = formats("json")
  }

  options("/*"){
    response.setHeader("Access-Control-Allow-Headers", request.getHeader("Access-Control-Request-Headers"));
  }

  get("/trending") {
    val time = new DateTime(DateTimeZone.UTC)

    // Scan 5 second intervals within the past 1 minute.
    // Stop as soon as first successful found.
    val result = (for (i <- Stream range (0,12); v = getTrendingTopics(i, time); if v.nonEmpty) yield v).headOption

    // Order topics by count in desc order and take top 20
    result.map(r => r.toIndexedSeq.sortBy(_._2).reverse.take(20))
  }

  get("/countries") {
    val attendeesByCountry = Event.dimensions("attending", "ALL")

    Await.result(attendeesByCountry, 5 seconds)
      .map{ case (a,b) => Map("code" -> a.toUpperCase, "value" -> b)}
  }

  get("/") {
    contentType="text/html"
    layoutTemplate("dashboard.ssp")
  }

  def roundDateTime(t: DateTime, d: Duration) = {
    t minus (t.getMillis - (t.getMillis.toDouble / d.getMillis).round * d.getMillis)
  }

  def getTrendingTopics(i:Int, time:DateTime) = {
    val t = roundDateTime(time minusSeconds 5*i, Duration.standardSeconds(5))
    val trendingTopics = Event.dimensions("trending", "S" + t.toString("yyyyMMddHHmmss"))
    Await.result(trendingTopics, 5 seconds)
  }
} 
Example 20
Source File: DrilldownConfig.scala    From maha   with Apache License 2.0 5 votes vote down vote up
package com.yahoo.maha.service.curators

import org.json4s.DefaultFormats
import org.json4s.scalaz.JsonScalaz._
import com.yahoo.maha.core.request._
import com.yahoo.maha.service.MahaServiceConfig
import com.yahoo.maha.service.factory._
import org.json4s.JValue
import org.json4s.scalaz.JsonScalaz


object DrilldownConfig {
  val MAXIMUM_ROWS : BigInt = 1000
  val DEFAULT_ENFORCE_FILTERS : Boolean = true


  implicit val formats: DefaultFormats.type = DefaultFormats

  def parse(curatorJsonConfig: CuratorJsonConfig) : JsonScalaz.Result[DrilldownConfig] = {
    import _root_.scalaz.syntax.validation._

    val config: JValue = curatorJsonConfig.json

    val dimension : Field = assignDim(config)

    val maxRows : BigInt = assignMaxRows(config)

    val enforceFilters : Boolean = assignEnforceFilters(config)

    val ordering : IndexedSeq[SortBy] = assignOrdering(config)

    val cube : String = assignCube(config, "")

    DrilldownConfig(enforceFilters, dimension, cube, ordering, maxRows).successNel
  }

  private def assignCube(config: JValue, default: String) : String = {
    val cubeResult : MahaServiceConfig.MahaConfigResult[String] = fieldExtended[String]("cube")(config)
    if (cubeResult.isSuccess) {
      cubeResult.toOption.get
    }
    else{
      default
    }
  }

  private def assignDim(config: JValue): Field = {
    val drillDim : MahaServiceConfig.MahaConfigResult[String] = fieldExtended[String]("dimension")(config)
    require(drillDim.isSuccess, "CuratorConfig for a DrillDown should have a dimension declared!")
    Field(drillDim.toOption.get, None, None)
  }

  private def assignMaxRows(config: JValue): BigInt = {
    val maxRowsLimitResult : MahaServiceConfig.MahaConfigResult[Int] = fieldExtended[Int]("mr")(config)
    if(maxRowsLimitResult.isSuccess) {
      maxRowsLimitResult.toOption.get
    }
    else{
      MAXIMUM_ROWS
    }
  }

  private def assignEnforceFilters(config: JValue): Boolean = {
    val enforceFiltersResult : MahaServiceConfig.MahaConfigResult[Boolean] = fieldExtended[Boolean]("enforceFilters")(config)
    if(enforceFiltersResult.isSuccess)
      enforceFiltersResult.toOption.get
    else{
      DEFAULT_ENFORCE_FILTERS
    }
  }

  private def assignOrdering(config: JValue): IndexedSeq[SortBy] = {
    val orderingResult : MahaServiceConfig.MahaConfigResult[List[SortBy]] = fieldExtended[List[SortBy]]("ordering")(config)
    if(orderingResult.isSuccess){
      orderingResult.toOption.get.toIndexedSeq
    }else {
      if(orderingResult.toEither.left.get.toString().contains("order must be asc|desc not")){
        throw new IllegalArgumentException (orderingResult.toEither.left.get.head.message)
      }
      else{
        IndexedSeq.empty
      }
    }
  }
}

case class DrilldownConfig(enforceFilters: Boolean,
                            dimension: Field,
                            cube: String,
                            ordering: IndexedSeq[SortBy],
                            maxRows: BigInt) extends CuratorConfig 
Example 21
Source File: VatReturnDeclaration.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.models.des

import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import org.json4s.JsonAST._
import org.json4s.native.Serialization
import org.json4s.{CustomSerializer, DefaultFormats}
import uk.gov.hmrc.vatapi.models.Amount

object VatReturnDeclaration {
}

case class VatReturnDeclaration(
                                 periodKey: String,
                                 vatDueSales: Amount,
                                 vatDueAcquisitions: Amount,
                                 vatDueTotal: Amount,
                                 vatReclaimedCurrPeriod: Amount,
                                 vatDueNet: Amount,
                                 totalValueSalesExVAT: Amount,
                                 totalValuePurchasesExVAT: Amount,
                                 totalValueGoodsSuppliedExVAT: Amount,
                                 totalAllAcquisitionsExVAT: Amount,
                                 agentReferenceNumber: Option[String] = None,
                                 receivedAt: DateTime
                               ) {
  def toJsonString: String = {
    implicit val formats = DefaultFormats ++ Seq(BigDecimalSerializer) ++ Seq(JodaSerializer)
    Serialization.write(this)
  }
}


private object BigDecimalSerializer extends CustomSerializer[Amount](format => ( {
  case jde: JDecimal => jde.num
}, {
  case bd: Amount => JDecimal(bd.setScale(2))
}
))

private object JodaSerializer extends CustomSerializer[DateTime](format => ( {
  case js: JString => DateTime.parse(js.s)
}, {
  case dt: DateTime => {
    val fmt = ISODateTimeFormat.dateTime()
    JString(dt.toString(fmt))
  }
}
)) 
Example 22
Source File: KinesisRDDWriter.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis.spark

import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import jp.co.bizreach.kinesis._
import org.apache.commons.codec.digest.DigestUtils
import org.apache.spark.TaskContext
import org.json4s.jackson.JsonMethods
import org.json4s.{DefaultFormats, Extraction, Formats}
import org.slf4j.LoggerFactory

class KinesisRDDWriter[A <: AnyRef](streamName: String, region: Regions,
                                    credentials: SparkAWSCredentials,
                                    chunk: Int, endpoint: Option[String]) extends Serializable {
  private val logger = LoggerFactory.getLogger(getClass)

  def write(task: TaskContext, data: Iterator[A]): Unit = {
    // send data, including retry
    def put(a: Seq[PutRecordsEntry]) = endpoint.map(e => KinesisRDDWriter.endpointClient(credentials)(e)(region))
      .getOrElse(KinesisRDDWriter.client(credentials)(region))
      .putRecordsWithRetry(PutRecordsRequest(streamName, a))
      .zipWithIndex.collect { case (Left(e), i) => a(i) -> s"${e.errorCode}: ${e.errorMessage}" }

    val errors = data.foldLeft(
      (Nil: Seq[PutRecordsEntry], Nil: Seq[(PutRecordsEntry, String)])
    ){ (z, x) =>
      val (records, failed) = z
      val payload = serialize(x)
      val entry   = PutRecordsEntry(DigestUtils.sha256Hex(payload), payload)

      // record exceeds max size
      if (entry.recordSize > recordMaxDataSize)
        records -> ((entry -> "per-record size limit") +: failed)

      // execute
      else if (records.size >= chunk || (records.map(_.recordSize).sum + entry.recordSize) >= recordsMaxDataSize)
        (entry +: Nil) -> (put(records) ++ failed)

      // buffering
      else
        (entry +: records) -> failed
    } match {
      case (Nil, e)  => e
      case (rest, e) => put(rest) ++ e
    }

    // failed records
    if (errors.nonEmpty) dump(errors)
  }

  protected def dump(errors: Seq[(PutRecordsEntry, String)]): Unit =
    logger.error(
      s"""Could not put record, count: ${errors.size}, following details:
         |${errors map { case (entry, message) => message + "\n" + new String(entry.data, "UTF-8") } mkString "\n"}
       """.stripMargin)

  protected def serialize(a: A)(implicit formats: Formats = DefaultFormats): Array[Byte] =
    JsonMethods.mapper.writeValueAsBytes(Extraction.decompose(a)(formats))

}

object KinesisRDDWriter {
  private val cache = collection.concurrent.TrieMap.empty[Regions, AmazonKinesis]


  private val client: SparkAWSCredentials => Regions => AmazonKinesis = {
    credentials => implicit region =>
      cache.getOrElseUpdate(region, AmazonKinesis(credentials.provider))
  }

  private val endpointClient: SparkAWSCredentials => String => Regions => AmazonKinesis = {
    credentials => endpoint => implicit region =>
      cache.getOrElseUpdate(region, AmazonKinesis(credentials.provider, new EndpointConfiguration(endpoint, region.getName)))
  }

} 
Example 23
Source File: L10-9Graph.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.graphx.Edge
import org.apache.spark.graphx.Graph
import org.apache.spark.graphx.Graph.graphToGraphOps
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object UserRankApp {

  def main(args: Array[String]) {
    if (args.length != 4) {
      System.err.println(
        "Usage: UserRankApp <appname> <batchInterval> <hostname> <port>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    ssc.socketTextStream(hostname, port.toInt)
      .map(r => {
        implicit val formats = DefaultFormats
        parse(r)
      })
      .foreachRDD(rdd => {
        val edges = rdd.map(jvalue => {
          implicit val formats = DefaultFormats
          ((jvalue \ "user_id").extract[String], (jvalue \ "friends").extract[Array[String]])
        })
          .flatMap(r => r._2.map(f => Edge(r._1.hashCode.toLong, f.hashCode.toLong, 1.0)))

        val vertices = rdd.map(jvalue => {
          implicit val formats = DefaultFormats
          ((jvalue \ "user_id").extract[String])
        })
          .map(r => (r.hashCode.toLong, r))

        val tolerance = 0.0001
        val graph = Graph(vertices, edges, "defaultUser")
          .subgraph(vpred = (id, idStr) => idStr != "defaultUser")
        val pr = graph.pageRank(tolerance).cache

        graph.outerJoinVertices(pr.vertices) {
          (userId, attrs, rank) => (rank.getOrElse(0.0).asInstanceOf[Number].doubleValue, attrs)
        }.vertices.top(10) {
          Ordering.by(_._2._1)
        }.foreach(rec => println("User id: %s, Rank: %f".format(rec._2._2, rec._2._1)))
      })

    ssc.start()
    ssc.awaitTermination()

  }

} 
Example 24
Source File: L10-2DataProc.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import org.apache.spark.HashPartitioner
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JNothing
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object DataProcApp {

  def main(args: Array[String]) {
    if (args.length != 4) {
      System.err.println(
        "Usage: DataProcApp <appname> <batchInterval> <hostname> <port>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    ssc.socketTextStream(hostname, port.toInt)
      .map(r => {
        implicit val formats = DefaultFormats
        parse(r)
      })
      .filter(jvalue => {
        jvalue \ "attributes" \ "Wi-Fi" != JNothing
      })
      .map(jvalue => {
        implicit val formats = DefaultFormats
        ((jvalue \ "attributes" \ "Wi-Fi").extract[String], (jvalue \ "stars").extract[Int])
      })
      .combineByKey(
        (v) => (v, 1),
        (accValue: (Int, Int), v) => (accValue._1 + v, accValue._2 + 1),
        (accCombine1: (Int, Int), accCombine2: (Int, Int)) => (accCombine1._1 + accCombine2._1, accCombine1._2 + accCombine2._2),
        new HashPartitioner(ssc.sparkContext.defaultParallelism))
      .map({ case (k, v) => (k, v._1 / v._2.toFloat) })
      .print()

    ssc.start()
    ssc.awaitTermination()
  }

} 
Example 25
Source File: L5-18Http.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object HttpApp {

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println(
        "Usage: HttpApp <appname> <outputPath>")
      System.exit(1)
    }

    val Seq(appName, outputPath) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://www.citibikenyc.com/stations/json", interval = batchInterval)
      .flatMap(rec => (parse(rec) \ "stationBeanList").children)
      .filter(rec => {
        implicit val formats = DefaultFormats
        (rec \ "statusKey").extract[Integer] != 1
      })
      .map(rec => rec.filterField {
        case JField("id", _) => true
        case JField("stationName", _) => true
        case JField("statusValue", _) => true
        case _ => false
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        (rec(0)._2.extract[Integer], rec(1)._2.extract[String], rec(2)._2.extract[String])
      })
      .saveAsTextFiles(outputPath)

    ssc.start()
    ssc.awaitTermination()
  }

} 
Example 26
Source File: L8-3-6-7DataFrameCreation.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.reflect.runtime.universe

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.functions.desc
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.native.Serialization.write
import org.json4s.DefaultFormats

object DataframeCreationApp {

  case class Cdr(squareId: Int, timeInterval: Long, countryCode: Int,
    smsInActivity: Float, smsOutActivity: Float, callInActivity: Float,
    callOutActivity: Float, internetTrafficActivity: Float)

  def main(args: Array[String]) {
    if (args.length != 4) {
      System.err.println(
        "Usage: CdrDataframeApp <appname> <batchInterval> <hostname> <port>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    val sqlC = new SQLContext(ssc.sparkContext)
    import sqlC.implicits._

    val cdrStream = ssc.socketTextStream(hostname, port.toInt)
      .map(_.split("\\t", -1))
      .foreachRDD(rdd => {
        //val cdrs = sqlC.createDataFrame(seqToCdr(rdd))
        //val cdrs = sqlC.createDataFrame(seqToCdr(rdd).collect())
        //val cdrs = seqToCdr(rdd).toDF()
        val cdrsJson = seqToCdr(rdd).map(r => {
          implicit val formats = DefaultFormats
          write(r)
        })
        val cdrs = sqlC.read.json(cdrsJson)

        cdrs.groupBy("countryCode").count().orderBy(desc("count")).show(5)
      })

    ssc.start()
    ssc.awaitTermination()

  }

  def seqToCdr(rdd: RDD[Array[String]]): RDD[Cdr] = {
    rdd.map(c => c.map(f => f match {
      case x if x.isEmpty() => "0"
      case x => x
    })).map(c => Cdr(c(0).toInt, c(1).toLong, c(2).toInt, c(3).toFloat,
      c(4).toFloat, c(5).toFloat, c(6).toFloat, c(7).toFloat))
  }
} 
Example 27
Source File: L8-29DataFrameExamplesJoin.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.reflect.runtime.universe

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.JDouble
import org.json4s.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.compact
import org.json4s.native.JsonMethods.parse
import org.json4s.native.JsonMethods.render
import org.json4s.string2JsonInput

object CdrDataframeExamples3App {

  case class Cdr(squareId: Int, timeInterval: Long, countryCode: Int,
    smsInActivity: Float, smsOutActivity: Float, callInActivity: Float,
    callOutActivity: Float, internetTrafficActivity: Float)

  def main(args: Array[String]) {
    if (args.length != 5) {
      System.err.println(
        "Usage: CdrDataframeExamples3App <appname> <batchInterval> <hostname> <port> <gridJsonPath>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port, gridJsonPath) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    val sqlC = new SQLContext(ssc.sparkContext)
    import sqlC.implicits._
    implicit val formats = DefaultFormats

    val gridFile = scala.io.Source.fromFile(gridJsonPath).mkString
    val gridGeo = (parse(gridFile) \ "features")
    val gridStr = gridGeo.children.map(r => {
      val c = (r \ "geometry" \ "coordinates").extract[List[List[List[Float]]]].flatten.flatten.map(r => JDouble(r))
      val l = List(("id", r \ "id"), ("x1", c(0)), ("y1", c(1)), ("x2", c(2)), ("y2", c(3)),
        ("x3", c(4)), ("y3", c(5)), ("x4", c(6)), ("y4", c(7)))
      compact(render(JObject(l)))
    })

    val gridDF = sqlC.read.json(ssc.sparkContext.makeRDD(gridStr))

    val cdrStream = ssc.socketTextStream(hostname, port.toInt)
      .map(_.split("\\t", -1))
      .foreachRDD(rdd => {
        val cdrs = seqToCdr(rdd).toDF()
        cdrs.join(gridDF, $"squareId" === $"id").show()
      })

    ssc.start()
    ssc.awaitTermination()
  }

  def seqToCdr(rdd: RDD[Array[String]]): RDD[Cdr] = {
    rdd.map(c => c.map(f => f match {
      case x if x.isEmpty() => "0"
      case x => x
    })).map(c => Cdr(c(0).toInt, c(1).toLong, c(2).toInt, c(3).toFloat,
      c(4).toFloat, c(5).toFloat, c(6).toFloat, c(7).toFloat))
  }
} 
Example 28
Source File: T8-5-L8-30-34DataFrameExamplesActions.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.reflect.runtime.universe

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.functions.desc
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apress.prospark.CdrDataframeExamplesActionsApp.Cdr
import org.json4s.DefaultFormats

object CdrDataframeExamplesActionsApp {

  case class Cdr(squareId: Int, timeInterval: Long, countryCode: Int,
    smsInActivity: Float, smsOutActivity: Float, callInActivity: Float,
    callOutActivity: Float, internetTrafficActivity: Float)

  def main(args: Array[String]) {
    if (args.length != 4) {
      System.err.println(
        "Usage: CdrDataframeExamplesActionsApp <appname> <batchInterval> <hostname> <port>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    val cl = Thread.currentThread().getContextClassLoader()
    val hiveC = new HiveContext(ssc.sparkContext)
    Thread.currentThread().setContextClassLoader(cl)
    import hiveC.implicits._
    implicit val formats = DefaultFormats

    val cdrStream = ssc.socketTextStream(hostname, port.toInt)
      .map(_.split("\\t", -1))
      .foreachRDD(rdd => {
        val cdrs = seqToCdr(rdd).toDF()

        val counts = cdrs.groupBy("countryCode").count().orderBy(desc("count"))
        counts.show(5)
        counts.show()
        println("head(5): " + counts.head(5))
        println("take(5): " + counts.take(5))
        println("head(): " + counts.head())
        println("first(5): " + counts.first())
        println("count(): " + counts.count())
        println("collect(): " + counts.collect())
        println("collectAsList(): " + counts.collectAsList())
        println("describe(): " + cdrs.describe("smsInActivity", "smsOutActivity", "callInActivity", "callOutActivity", "internetTrafficActivity").show())
        counts.write.format("parquet").save("/tmp/parquent" + rdd.id)
        counts.write.format("json").save("/tmp/json" + rdd.id)
        counts.write.parquet("/tmp/parquent2" + rdd.id)
        counts.write.json("/tmp/json2" + rdd.id)
        counts.write.saveAsTable("count_table")
        cdrs.groupBy("countryCode").count().orderBy(desc("count")).write.mode(SaveMode.Append).save("/tmp/counts")
        val prop: java.util.Properties = new java.util.Properties()
        counts.write.jdbc("jdbc:mysql://hostname:port/cdrsdb", "count_table", prop)
      })

    ssc.start()
    ssc.awaitTermination()
  }

  def seqToCdr(rdd: RDD[Array[String]]): RDD[Cdr] = {
    rdd.map(c => c.map(f => f match {
      case x if x.isEmpty() => "0"
      case x => x
    })).map(c => Cdr(c(0).toInt, c(1).toLong, c(2).toInt, c(3).toFloat,
      c(4).toFloat, c(5).toFloat, c(6).toFloat, c(7).toFloat))
  }
} 
Example 29
Source File: T8-3DataFrameExamplesNA.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.reflect.runtime.universe

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.JDouble
import org.json4s.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.compact
import org.json4s.native.JsonMethods.parse
import org.json4s.native.JsonMethods.render
import org.json4s.string2JsonInput

object CdrDataframeExamplesNAApp {

  case class Cdr(squareId: Int, timeInterval: Long, countryCode: Int,
    smsInActivity: Float, smsOutActivity: Float, callInActivity: Float,
    callOutActivity: Float, internetTrafficActivity: Float)

  def main(args: Array[String]) {
    if (args.length != 4) {
      System.err.println(
        "Usage: CdrDataframeExamplesNAApp <appname> <batchInterval> <hostname> <port>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    val sqlC = new SQLContext(ssc.sparkContext)
    import sqlC.implicits._
    implicit val formats = DefaultFormats

    val cdrStream = ssc.socketTextStream(hostname, port.toInt)
      .map(_.split("\\t", -1))
      .foreachRDD(rdd => {
        val cdrs = seqToCdr(rdd).toDF()
        cdrs.na.drop("any").show()
        cdrs.na.fill(0, Array("squareId")).show()
        cdrs.na.replace("squareId", Map(0 -> 1)).show()
        println("Correlation: " + cdrs.stat.corr("smsOutActivity", "callOutActivity"))
        println("Covariance: " + cdrs.stat.cov("smsInActivity", "callInActivity"))
        cdrs.stat.crosstab("squareId", "countryCode").show()
        cdrs.stat.freqItems(Array("squareId", "countryCode"), 0.1).show()
        cdrs.stat.crosstab("callOutActivity", "callInActivity").show()
      })

    ssc.start()
    ssc.awaitTermination()
  }

  def seqToCdr(rdd: RDD[Array[String]]): RDD[Cdr] = {
    rdd.map(c => c.map(f => f match {
      case x if x.isEmpty() => "0"
      case x => x
    })).map(c => Cdr(c(0).toInt, c(1).toLong, c(2).toInt, c(3).toFloat,
      c(4).toFloat, c(5).toFloat, c(6).toFloat, c(7).toFloat))
  }
} 
Example 30
Source File: L8-35DataFrameExamplesRDD.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.reflect.runtime.universe

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.types.StructType
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats

object CdrDataframeExamplesRDDApp {

  case class Cdr(squareId: Int, timeInterval: Long, countryCode: Int,
    smsInActivity: Float, smsOutActivity: Float, callInActivity: Float,
    callOutActivity: Float, internetTrafficActivity: Float)

  def main(args: Array[String]) {
    if (args.length != 5) {
      System.err.println(
        "Usage: CdrDataframeExamplesRDDApp <appname> <batchInterval> <hostname> <schemaPath>")
      System.exit(1)
    }
    val Seq(appName, batchInterval, hostname, port, schemaFile) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))

    val sqlC = new SQLContext(ssc.sparkContext)
    import sqlC.implicits._
    implicit val formats = DefaultFormats

    val schemaJson = scala.io.Source.fromFile(schemaFile).mkString
    val schema = DataType.fromJson(schemaJson).asInstanceOf[StructType]

    val cdrStream = ssc.socketTextStream(hostname, port.toInt)
      .map(_.split("\\t", -1))
      .foreachRDD(rdd => {
        val cdrs = seqToCdr(rdd).toDF()
        val highInternet = sqlC.createDataFrame(cdrs.rdd.filter(r => r.getFloat(3) + r.getFloat(4) >= r.getFloat(5) + r.getFloat(6)), schema)
        val highOther = cdrs.except(highInternet)
        val highInternetGrid = highInternet.select("squareId", "countryCode").dropDuplicates()
        val highOtherGrid = highOther.select("squareId", "countryCode").dropDuplicates()
        highOtherGrid.except(highInternetGrid).show()
        highInternetGrid.except(highOtherGrid).show()
      })

    ssc.start()
    ssc.awaitTermination()
  }

  def seqToCdr(rdd: RDD[Array[String]]): RDD[Cdr] = {
    rdd.map(c => c.map(f => f match {
      case x if x.isEmpty() => "0"
      case x => x
    })).map(c => Cdr(c(0).toInt, c(1).toLong, c(2).toInt, c(3).toFloat,
      c(4).toFloat, c(5).toFloat, c(6).toFloat, c(7).toFloat))
  }
} 
Example 31
Source File: L6-6PerRecord.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.eclipse.paho.client.mqttv3.MqttClient
import org.eclipse.paho.client.mqttv3.MqttMessage
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object MqttSinkAppB {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: MqttSinkApp <appname> <outputBrokerUrl> <topic>")
      System.exit(1)
    }

    val Seq(appName, outputBrokerUrl, topic) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children).map(rec => JObject(JField("Timestamp", query \ "created")).merge(rec))
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        rec.children.map(f => f.extract[String]) mkString ","
      })
      .foreachRDD { rdd =>
        rdd.foreach { rec =>
          {
            val client = new MqttClient(outputBrokerUrl, MqttClient.generateClientId(), new MemoryPersistence())
            client.connect()
            client.publish(topic, new MqttMessage(rec.getBytes(StandardCharsets.UTF_8)))
            client.disconnect()
            client.close()
          }
        }
      }

    ssc.start()
    ssc.awaitTermination()
  }

} 
Example 32
Source File: L6-12StaticPool.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.eclipse.paho.client.mqttv3.MqttClient
import org.eclipse.paho.client.mqttv3.MqttMessage
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object MqttSinkAppF {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: MqttSinkApp <appname> <outputBrokerUrl> <topic>")
      System.exit(1)
    }

    val Seq(appName, outputBrokerUrl, topic) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    val mqttSink = ssc.sparkContext.broadcast(MqttSinkLazy(outputBrokerUrl))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children).map(rec => JObject(JField("Timestamp", query \ "created")).merge(rec))
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        rec.children.map(f => f.extract[String]) mkString ","
      })
      .foreachRDD { rdd =>
        rdd.foreachPartition { par =>
          par.foreach(message => mqttSink.value.client.publish(topic, new MqttMessage(message.getBytes(StandardCharsets.UTF_8))))
        }
      }

    ssc.start()
    ssc.awaitTermination()
  }

}

class MqttSinkLazy(brokerUrl: String) extends Serializable {
  lazy val client = {
    val client = new MqttClient(brokerUrl, MqttClient.generateClientId(), new MemoryPersistence())
    client.connect()
    sys.addShutdownHook {
      client.disconnect()
      client.close()
    }
    client
  }
}

object MqttSinkLazy {
  val brokerUrl = "tcp://localhost:1883"
  val client = new MqttSinkLazy(brokerUrl)

  def apply(brokerUrl: String): MqttSinkLazy = {
    client
  }
} 
Example 33
Source File: L6-8Static.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.eclipse.paho.client.mqttv3.MqttClient
import org.eclipse.paho.client.mqttv3.MqttMessage
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object MqttSinkAppD {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: MqttSinkApp <appname> <outputBrokerUrl> <topic>")
      System.exit(1)
    }

    val Seq(appName, outputBrokerUrl, topic) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children).map(rec => JObject(JField("Timestamp", query \ "created")).merge(rec))
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        rec.children.map(f => f.extract[String]) mkString ","
      })
      .foreachRDD { rdd =>
        rdd.foreachPartition { par =>
          par.foreach(message => MqttSink().publish(topic, new MqttMessage(message.getBytes(StandardCharsets.UTF_8))))
        }
      }

    ssc.start()
    ssc.awaitTermination()
  }
}

object MqttSink {
  val brokerUrl = "tcp://localhost:1883"
  val client = new MqttClient(brokerUrl, MqttClient.generateClientId(), new MemoryPersistence())
  client.connect()
  sys.addShutdownHook {
    client.disconnect()
    client.close()
  }

  def apply(): MqttClient = {
    client
  }
} 
Example 34
Source File: L6-18Cassandra.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Text
import java.nio.ByteBuffer
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat
import org.apache.cassandra.hadoop.ConfigHelper
import org.apache.cassandra.thrift.ColumnOrSuperColumn
import org.apache.cassandra.thrift.Column
import org.apache.cassandra.utils.ByteBufferUtil
import org.apache.cassandra.thrift.Mutation
import java.util.Arrays

object CassandraSinkApp {

  def main(args: Array[String]) {
    if (args.length != 6) {
      System.err.println(
        "Usage: CassandraSinkApp <appname> <cassandraHost> <cassandraPort> <keyspace> <columnFamilyName> <columnName>")
      System.exit(1)
    }

    val Seq(appName, cassandraHost, cassandraPort, keyspace, columnFamilyName, columnName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10
    val windowSize = 20
    val slideInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat))
      })
      .reduceByKeyAndWindow((x: Float, y: Float) => (x + y), Seconds(windowSize), Seconds(slideInterval))
      .foreachRDD(rdd => {
        val jobConf = new Configuration()
        ConfigHelper.setOutputRpcPort(jobConf, cassandraPort)
        ConfigHelper.setOutputInitialAddress(jobConf, cassandraHost)
        ConfigHelper.setOutputColumnFamily(jobConf, keyspace, columnFamilyName)
        ConfigHelper.setOutputPartitioner(jobConf, "Murmur3Partitioner")
        rdd.map(rec => {
          val c = new Column()
          c.setName(ByteBufferUtil.bytes(columnName))
          c.setValue(ByteBufferUtil.bytes(rec._2 / (windowSize / batchInterval)))
          c.setTimestamp(System.currentTimeMillis)
          val m = new Mutation()
          m.setColumn_or_supercolumn(new ColumnOrSuperColumn())
          m.column_or_supercolumn.setColumn(c)
          (ByteBufferUtil.bytes(rec._1), Arrays.asList(m))
        }).saveAsNewAPIHadoopFile(keyspace, classOf[ByteBuffer], classOf[List[Mutation]], classOf[ColumnFamilyOutputFormat], jobConf)
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 35
Source File: L6-20CassandraConnector.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.reflect.runtime.universe

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

import com.datastax.spark.connector.SomeColumns
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.streaming.toDStreamFunctions
import com.datastax.spark.connector.toNamedColumnRef

object CassandraConnectorSinkApp {

  def main(args: Array[String]) {
    if (args.length != 6) {
      System.err.println(
        "Usage: CassandraConnectorSinkApp <appname> <cassandraHost> <cassandraPort> <keyspace> <tableName> <columnName>")
      System.exit(1)
    }

    val Seq(appName, cassandraHost, cassandraPort, keyspace, tableName, columnName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)
      .set("spark.cassandra.connection.host", cassandraHost)
      .set("spark.cassandra.connection.port", cassandraPort)

    val batchInterval = 10
    val windowSize = 20
    val slideInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    CassandraConnector(conf).withSessionDo { session =>
      session.execute(s"CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1 }".format(keyspace))
      session.execute(s"CREATE TABLE IF NOT EXISTS %s.%s (key TEXT PRIMARY KEY, %s FLOAT)".format(keyspace, tableName, columnName))
    }

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat))
      })
      .reduceByKeyAndWindow((x: Float, y: Float) => (x + y), Seconds(windowSize), Seconds(slideInterval))
      .map(stock => (stock._1, stock._2 / (windowSize / batchInterval)))
      .saveToCassandra(keyspace, tableName)

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 36
Source File: L6-5Exception.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.eclipse.paho.client.mqttv3.MqttClient
import org.eclipse.paho.client.mqttv3.MqttMessage
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object MqttSinkAppA {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: MqttSinkApp <appname> <outputBrokerUrl> <topic>")
      System.exit(1)
    }

    val Seq(appName, outputBrokerUrl, topic) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children).map(rec => JObject(JField("Timestamp", query \ "created")).merge(rec))
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        rec.children.map(f => f.extract[String]) mkString ","
      })
      .foreachRDD { rdd =>
        val client = new MqttClient(outputBrokerUrl, MqttClient.generateClientId(), new MemoryPersistence())
        client.connect()
        rdd.foreach(rec => client.publish(topic, new MqttMessage(rec.getBytes(StandardCharsets.UTF_8))))
        client.disconnect()
        client.close()
      }

    ssc.start()
    ssc.awaitTermination()
  }

} 
Example 37
Source File: L6-10LazyStatic.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.eclipse.paho.client.mqttv3.MqttClient
import org.eclipse.paho.client.mqttv3.MqttMessage
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput
import org.apache.commons.pool2.PooledObject
import org.apache.commons.pool2.BasePooledObjectFactory
import org.apache.commons.pool2.impl.DefaultPooledObject
import org.apache.commons.pool2.impl.GenericObjectPool
import org.apache.commons.pool2.ObjectPool

object MqttSinkAppE {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: MqttSinkApp <appname> <outputBrokerUrl> <topic>")
      System.exit(1)
    }

    val Seq(appName, outputBrokerUrl, topic) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children).map(rec => JObject(JField("Timestamp", query \ "created")).merge(rec))
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        rec.children.map(f => f.extract[String]) mkString ","
      })
      .foreachRDD { rdd =>
        rdd.foreachPartition { par =>
          val mqttSink = MqttSinkPool().borrowObject()
          par.foreach(message => mqttSink.publish(topic, new MqttMessage(message.getBytes(StandardCharsets.UTF_8))))
          MqttSinkPool().returnObject(mqttSink)
        }
      }

    ssc.start()
    ssc.awaitTermination()
  }
}

object MqttSinkPool {
  val poolSize = 8
  val brokerUrl = "tcp://localhost:1883"
  val mqttPool = new GenericObjectPool[MqttClient](new MqttClientFactory(brokerUrl))
  mqttPool.setMaxTotal(poolSize)
  sys.addShutdownHook {
    mqttPool.close()
  }
  
  def apply(): GenericObjectPool[MqttClient] = {
    mqttPool
  }
}

class MqttClientFactory(brokerUrl: String) extends BasePooledObjectFactory[MqttClient] {
  override def create() = {
    val client = new MqttClient(brokerUrl, MqttClient.generateClientId(), new MemoryPersistence())
    client.connect()
    client
  }
  override def wrap(client: MqttClient) = new DefaultPooledObject[MqttClient](client)
  override def validateObject(pObj: PooledObject[MqttClient]) = pObj.getObject.isConnected()
  override def destroyObject(pObj: PooledObject[MqttClient]) = {
    pObj.getObject.disconnect()
    pObj.getObject.close()
  }
  override def passivateObject(pObj: PooledObject[MqttClient]) = {}
} 
Example 38
Source File: L6-16SparkHBase.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object SparkHBaseBulkPutApp {

  def main(args: Array[String]) {
    if (args.length != 4) {
      System.err.println(
        "Usage: SparkHBaseBulkPutApp <appname> <tableName> <columnFamilyName> <columnName>")
      System.exit(1)
    }

    val Seq(appName, tableName, columnFamilyName, columnName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10
    val windowSize = 20
    val slideInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    val hbaseConf = HBaseConfiguration.create()
    val hContext = new HBaseContext(ssc.sparkContext, hbaseConf)

    val windowed = HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat))
      })
      .reduceByKeyAndWindow((x: Float, y: Float) => (x + y), Seconds(windowSize), Seconds(slideInterval))

    hContext.streamBulkPut[(String, Float)](windowed, TableName.valueOf(tableName), rec => {
      val put = new Put(rec._1.getBytes)
      put.addColumn(columnFamilyName.getBytes, columnName.getBytes, Bytes.toBytes(rec._2 / (windowSize / batchInterval)))
      put
    })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 39
Source File: L6-22Counters.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object StatefulCountersApp {

  def main(args: Array[String]) {
    if (args.length != 1) {
      System.err.println(
        "Usage: StatefulCountersApp <appname>")
      System.exit(1)
    }

    val Seq(appName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))
    
    var globalMax: AtomicLong = new AtomicLong(Long.MinValue)
    var globalMin: AtomicLong = new AtomicLong(Long.MaxValue)
    var globalCounter500: AtomicLong = new AtomicLong(0)

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat, (rec \ "Volume").extract[String].toLong))
      })
      .foreachRDD(rdd => {
        val stocks = rdd.take(10)
        stocks.foreach(stock => {
          val price = stock._2
          val volume = stock._3
          if (volume > globalMax.get()) {
            globalMax.set(volume)
          }
          if (volume < globalMin.get()) {
            globalMin.set(volume)
          }
          if (price > 500) {
            globalCounter500.incrementAndGet()
          }
        })
        if (globalCounter500.get() > 1000L) {
          println("Global counter has reached 1000")
          println("Max ----> " + globalMax.get)
          println("Min ----> " + globalMin.get)
          globalCounter500.set(0)
        }
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 40
Source File: L6-24Accumulators.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.collection.mutable

import org.apache.spark.AccumulableParam
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object StatefulAccumulatorsApp {

  object StockAccum extends AccumulableParam[mutable.HashMap[String, (Long, Long, Long)], (String, (Float, Long))] {
    def zero(t: mutable.HashMap[String, (Long, Long, Long)]): mutable.HashMap[String, (Long, Long, Long)] = {
      new mutable.HashMap[String, (Long, Long, Long)]()
    }
    def addInPlace(t1: mutable.HashMap[String, (Long, Long, Long)], t2: mutable.HashMap[String, (Long, Long, Long)]): mutable.HashMap[String, (Long, Long, Long)] = {
      t1 ++ t2.map {
        case (k, v2) => (k -> {
          val v1 = t1.getOrElse(k, (Long.MaxValue, Long.MinValue, 0L))
          val newMin = if (v2._1 < v1._1) v2._1 else v1._1
          val newMax = if (v2._2 > v1._2) v2._2 else v1._2
          (newMin, newMax, v1._3 + v2._3)
        })
      }
    }
    def addAccumulator(t1: mutable.HashMap[String, (Long, Long, Long)], t2: (String, (Float, Long))): mutable.HashMap[String, (Long, Long, Long)] = {
      val prevStats = t1.getOrElse(t2._1, (Long.MaxValue, Long.MinValue, 0L))
      val newVals = t2._2
      var newCount = prevStats._3
      if (newVals._1 > 500.0) {
        newCount += 1
      }
      val newMin = if (newVals._2 < prevStats._1) newVals._2 else prevStats._1
      val newMax = if (newVals._2 > prevStats._2) newVals._2 else prevStats._2
      t1 += t2._1 -> (newMin, newMax, newCount)
    }
  }

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println(
        "Usage: StatefulAccumulatorsApp <appname> <checkpointDir>")
      System.exit(1)
    }

    val Seq(appName, checkpointDir) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    val stateAccum = ssc.sparkContext.accumulable(new mutable.HashMap[String, (Long, Long, Long)]())(StockAccum)

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], ((rec \ "LastTradePriceOnly").extract[String].toFloat, (rec \ "Volume").extract[String].toLong)))
      })
      .foreachRDD(rdd => {
        rdd.foreach({ stock =>
          stateAccum += (stock._1, (stock._2._1, stock._2._2))
        })
        for ((sym, stats) <- stateAccum.value.to) printf("Symbol: %s, Stats: %s\n", sym, stats)
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 41
Source File: L6-7PerPartition.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.eclipse.paho.client.mqttv3.MqttClient
import org.eclipse.paho.client.mqttv3.MqttMessage
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object MqttSinkAppC {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: MqttSinkApp <appname> <outputBrokerUrl> <topic>")
      System.exit(1)
    }

    val Seq(appName, outputBrokerUrl, topic) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children).map(rec => JObject(JField("Timestamp", query \ "created")).merge(rec))
      })
      .map(rec => {
        implicit val formats = DefaultFormats
        rec.children.map(f => f.extract[String]) mkString ","
      })
      .foreachRDD { rdd =>
        rdd.foreachPartition { par =>
          val client = new MqttClient(outputBrokerUrl, MqttClient.generateClientId(), new MemoryPersistence())
          client.connect()
          par.foreach(rec => client.publish(topic, new MqttMessage(rec.getBytes(StandardCharsets.UTF_8))))
          client.disconnect()
          client.close()
        }
      }

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 42
Source File: L6-14HBase.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.io.Text
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object HBaseSinkApp {

  def main(args: Array[String]) {
    if (args.length != 5) {
      System.err.println(
        "Usage: HBaseSinkApp <appname> <hbaseMaster> <tableName> <columnFamilyName> <columnName>")
      System.exit(1)
    }

    val Seq(appName, hbaseMaster, tableName, columnFamilyName, columnName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10
    val windowSize = 20
    val slideInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat))
      })
      .reduceByKeyAndWindow((x: Float, y: Float) => (x + y), Seconds(windowSize), Seconds(slideInterval))
      .foreachRDD(rdd => {
        val hbaseConf = HBaseConfiguration.create()
        hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
        hbaseConf.set("hbase.master", hbaseMaster)
        val jobConf = new Configuration(hbaseConf)
        jobConf.set("mapreduce.job.outputformat.class", classOf[TableOutputFormat[Text]].getName)
        rdd.map(rec => {
          val put = new Put(rec._1.getBytes)
          put.addColumn(columnFamilyName.getBytes, columnName.getBytes, Bytes.toBytes(rec._2 / (windowSize / batchInterval)))
          (rec._1, put)
        }).saveAsNewAPIHadoopDataset(jobConf)
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 43
Source File: L6-23UpdateState.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object StatefulUpdateStateApp {

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println(
        "Usage: StatefulUpdateStateApp <appname> <checkpointDir>")
      System.exit(1)
    }

    val Seq(appName, checkpointDir) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))
    ssc.checkpoint(checkpointDir)

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], ((rec \ "LastTradePriceOnly").extract[String].toFloat, (rec \ "Volume").extract[String].toLong)))
      })
      .updateStateByKey(updateState)
      .print()

    def updateState(values: Seq[(Float, Long)], state: Option[(Long, Long, Long)]): Option[(Long, Long, Long)] = {
      val volumes = values.map(s => s._2)
      val localMin = volumes.min
      val localMax = volumes.max
      val localCount500 = values.map(s => s._1).count(price => price > 500)
      val globalValues = state.getOrElse((Long.MaxValue, Long.MinValue, 0L)).asInstanceOf[(Long, Long, Long)]
      val newMin = if (localMin < globalValues._1) localMin else globalValues._1
      val newMax = if (localMax > globalValues._2) localMax else globalValues._2
      val newCount500 = globalValues._3 + localCount500
      return Some(newMin, newMax, newCount500)
    }

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 44
Source File: L6-26Redis.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.JavaConversions.mutableMapAsJavaMap
import scala.collection.mutable

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

import redis.clients.jedis.Jedis

object StatefulRedisApp {

  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: StatefulRedisApp <appname> <checkpointDir> <hostname>")
      System.exit(1)
    }

    val Seq(appName, checkpointDir, hostname) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], ((rec \ "LastTradePriceOnly").extract[String].toFloat, (rec \ "Volume").extract[String].toLong)))
      })
      .foreachRDD(rdd => {
        rdd.foreachPartition({ part =>
          val jedis = new Jedis(hostname)
          part.foreach(f => {
            val prev = jedis.hmget(f._1, "min", "max", "count")
            if (prev(0) == null) {
              jedis.hmset(f._1, mutable.HashMap("min" -> Long.MaxValue.toString, "max" -> Long.MinValue.toString, "count" -> 0.toString))
            } else {
              val prevLong = prev.toList.map(v => v.toLong)
              var newCount = prevLong(2)
              val newPrice = f._2._1
              val newVolume = f._2._2
              if (newPrice > 500.0) {
                newCount += 1
              }
              val newMin = if (newVolume < prevLong(0)) newVolume else prevLong(0)
              val newMax = if (newVolume > prevLong(1)) newVolume else prevLong(1)
              jedis.hmset(f._1, mutable.HashMap("min" -> newMin.toString, "max" -> newMax.toString, "count" -> newCount.toString))
            }
          })
          jedis.close()
        })

        val jedis = new Jedis(hostname)
        jedis.scan(0).getResult.foreach(sym => println("Symbol: %s, Stats: %s".format(sym, jedis.hmget(sym, "min", "max", "count").toString)))
        jedis.close()
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 45
Source File: GeneratorThread.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.benchmark.generator.threads

import java.util.{Date, UUID}

import akka.event.slf4j.SLF4JLogging
import com.stratio.benchmark.generator.runners.StoppedThreads
import com.stratio.kafka.benchmark.generator.kafka.KafkaProducer
import com.stratio.models.benchmark.generator.models.{RawModel, RawModelCommonData}
import kafka.producer.Producer
import org.json4s.native.Serialization._
import org.json4s.{DefaultFormats, Formats}

class GeneratorThread(producer: Producer[String,String], timeout: Long, stoppedThreads: StoppedThreads, topic: String)
  extends Runnable with SLF4JLogging with RawModelCommonData {

  implicit val formats: Formats = DefaultFormats

  var numberOfEvents = 0

  override def run: Unit = {
    generateRaw(new Date().getTime)
    producer.close()

    stoppedThreads.incrementNumberOfEvents(numberOfEvents)
    stoppedThreads.incrementNumberOfThreads
  }

  private def generateRaw(startTimeInMillis: Long): Unit = {
    while(((startTimeInMillis + timeout) - new Date().getTime) > 0) {
      val id = UUID.randomUUID.toString
      val timestamp = RawModel.generateTimestamp
      val clientId = RawModel.generateRandomInt(RawModel.Range_client_id._1, RawModel.Range_client_id._2)
      val latitude = clientIdGeo.get(clientId).get._1
      val longitude = clientIdGeo.get(clientId).get._2
      val paymentMethod = RawModel.generatePaymentMethod()
      val creditCard = clientIdCreditCard.get(clientId).get
      val shoppingCenter = RawModel.generateShoppingCenter()
      val employee = RawModel.generateRandomInt(RawModel.Range_employee._1, RawModel.Range_employee._2)

      val rawModel = new RawModel(
        id,
        timestamp,
        clientId,
        latitude,
        longitude,
        paymentMethod,
        creditCard,
        shoppingCenter,
        employee)

      KafkaProducer.send(producer, topic, write(rawModel))
      numberOfEvents = numberOfEvents + 1
    }
  }
} 
Example 46
Source File: JsoneyStringTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.sdk.properties

import org.json4s.jackson.JsonMethods._
import org.json4s.jackson.Serialization.write
import org.json4s.{DefaultFormats, _}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{Matchers, WordSpecLike}

@RunWith(classOf[JUnitRunner])
class JsoneyStringTest extends WordSpecLike
with Matchers {

  "A JsoneyString" should {
    "have toString equivalent to its internal string" in {
      assertResult("foo")(new JsoneyString("foo").toString)
    }

    "be deserialized if its JSON" in {
      implicit val json4sJacksonFormats = DefaultFormats + new JsoneyStringSerializer()
      val result = parse( """{ "foo": "bar" }""").extract[JsoneyString]
      assertResult(new JsoneyString( """{"foo":"bar"}"""))(result)
    }

    "be deserialized if it's a String" in {
      implicit val json4sJacksonFormats = DefaultFormats + new JsoneyStringSerializer()
      val result = parse("\"foo\"").extract[JsoneyString]
      assertResult(new JsoneyString("foo"))(result)
    }

    "be deserialized if it's an Int" in {
      implicit val json4sJacksonFormats = DefaultFormats + new JsoneyStringSerializer()
      val result = parse("1").extract[JsoneyString]
      assertResult(new JsoneyString("1"))(result)
    }

    "be serialized as JSON" in {
      implicit val json4sJacksonFormats = DefaultFormats + new JsoneyStringSerializer()

      var result = write(new JsoneyString("foo"))
      assertResult("\"foo\"")(result)

      result = write(new JsoneyString("{\"foo\":\"bar\"}"))
      assertResult("\"{\\\"foo\\\":\\\"bar\\\"}\"")(result)
    }

    "be deserialized if it's an JBool" in {
      implicit val json4sJacksonFormats = DefaultFormats + new JsoneyStringSerializer()
      val result = parse("true").extract[JsoneyString]
      assertResult(new JsoneyString("true"))(result)
    }

    "have toSeq equivalent to its internal string" in {
      assertResult(Seq("o"))(new JsoneyString("foo").toSeq)
    }
  }
} 
Example 47
Source File: KafkaBase.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.input.kafka

import java.io.{Serializable => JSerializable}

import com.stratio.sparta.plugin.input.kafka.models.TopicsModel
import com.stratio.sparta.sdk.properties.JsoneyStringSerializer
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.json4s.jackson.Serialization._
import org.json4s.{DefaultFormats, Formats}

import scala.util.Try

trait KafkaBase {

  val DefaultHost = "localhost"
  val DefaultBrokerPort = "9092"
  val DefaultZkPort = "2181"
  val DefaultZookeeperPath = ""

  val properties: Map[String, JSerializable]

  

  def extractTopics: Set[String] =
    if (properties.contains("topics"))
      getTopicsPartitions.topics.map(topicPartitionModel => topicPartitionModel.topic).toSet
    else throw new IllegalStateException(s"Invalid configuration, topics must be declared in direct approach")

  private def getTopicsPartitions: TopicsModel = {
    implicit val json4sJacksonFormats: Formats = DefaultFormats + new JsoneyStringSerializer()
    val topicsModel = read[TopicsModel](
      s"""{"topics": ${properties.get("topics").fold("[]") { values => values.toString }}}""""
    )

    if (topicsModel.topics.isEmpty)
      throw new IllegalStateException(s"topics is mandatory")
    else topicsModel
  }
} 
Example 48
Source File: CacheStatusSerDe.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.oap.utils

import org.json4s.{DefaultFormats, StringInput}
import org.json4s.JsonAST._
import org.json4s.JsonDSL._

import org.apache.spark.sql.execution.datasources.oap.filecache.FiberCacheStatus
import org.apache.spark.sql.execution.datasources.oap.io.{OapDataFileMeta, OapDataFileMetaV1}
import org.apache.spark.util.collection.{BitSet, OapBitSet}


private[oap] object CacheStatusSerDe extends SerDe[String, Seq[FiberCacheStatus]] {
  import org.json4s.jackson.JsonMethods._

  override def serialize(statusRawDataArray: Seq[FiberCacheStatus]): String = {
    val statusJArray = JArray(statusRawDataArray.map(statusRawDataToJson).toList)
    compact(render("statusRawDataArray" -> statusJArray))
  }

  private implicit val format = DefaultFormats

  override def deserialize(json: String): Seq[FiberCacheStatus] = {
    (parse(StringInput(json), false) \ "statusRawDataArray")
      .extract[List[JValue]].map(statusRawDataFromJson)
  }

  private[oap] def bitSetToJson(bitSet: OapBitSet): JValue = {
    val words: Array[Long] = bitSet.toLongArray()
    val bitSetJson = JArray(words.map(word => ("word" -> word): JValue).toList)
    ("bitSet" -> bitSetJson)
  }

  private[oap] def bitSetFromJson(json: JValue): OapBitSet = {
    val words: Array[Long] = (json \ "bitSet").extract[List[JValue]].map { word =>
      (word \ "word").extract[Long]
    }.toArray[Long]
    new OapBitSet(words)
  }

  // we only transfer 4 items in DataFileMeta to driver, ther are rowCountInEachGroup,
  // rowCountInLastGroup, groupCount, fieldCount respectively
  private[oap] def dataFileMetaToJson(dataFileMeta: OapDataFileMeta): JValue = {
    ("rowCountInEachGroup" -> dataFileMeta.rowCountInEachGroup) ~
      ("rowCountInLastGroup" -> dataFileMeta.rowCountInLastGroup) ~
      ("groupCount" -> dataFileMeta.groupCount) ~
      ("fieldCount" -> dataFileMeta.fieldCount)
  }

  private[oap] def dataFileMetaFromJson(json: JValue): OapDataFileMeta = {
    val rowCountInEachGroup = (json \ "rowCountInEachGroup").extract[Int]
    val rowCountInLastGroup = (json \ "rowCountInLastGroup").extract[Int]
    val groupCount = (json \ "groupCount").extract[Int]
    val fieldCount = (json \ "fieldCount").extract[Int]
    new OapDataFileMetaV1(
      rowCountInEachGroup = rowCountInEachGroup,
      rowCountInLastGroup = rowCountInLastGroup,
      groupCount = groupCount,
      fieldCount = fieldCount)
  }

  private[oap] def statusRawDataToJson(statusRawData: FiberCacheStatus): JValue = {
    ("fiberFilePath" -> statusRawData.file) ~
      ("bitSetJValue" -> bitSetToJson(statusRawData.bitmask)) ~
      ("groupCount" -> statusRawData.groupCount) ~
      ("fieldCount" -> statusRawData.fieldCount)
  }

  private[oap] def statusRawDataFromJson(json: JValue): FiberCacheStatus = {
    val path = (json \ "fiberFilePath").extract[String]
    val bitSet = bitSetFromJson(json \ "bitSetJValue")
    val groupCount = (json \ "groupCount").extract[Int]
    val fieldCount = (json \ "fieldCount").extract[Int]
    FiberCacheStatus(path, bitSet, groupCount, fieldCount)
  }
} 
Example 49
Source File: OrderBook.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model

import org.json4s.JsonAST.JObject
import org.json4s.native.JsonMethods._
import org.json4s.{DefaultFormats, JValue}
import stellar.sdk.KeyPair
import stellar.sdk.model.response.ResponseParser

case class OrderBook(selling: Asset, buying: Asset, bids: Seq[Order], asks: Seq[Order])

case class Order(price: Price, quantity: Long)

object OrderBookDeserializer extends ResponseParser[OrderBook]({ o: JObject =>
  implicit val formats = DefaultFormats

  def asset(obj: JValue) = {
    def assetCode = (obj \ s"asset_code").extract[String]

    def assetIssuer = KeyPair.fromAccountId((obj \ s"asset_issuer").extract[String])

    (obj \ s"asset_type").extract[String] match {
      case "native" => NativeAsset
      case "credit_alphanum4" => IssuedAsset4(assetCode, assetIssuer)
      case "credit_alphanum12" => IssuedAsset12(assetCode, assetIssuer)
      case t => throw new RuntimeException(s"Unrecognised asset type '$t'")
    }
  }

  def orders(obj: JValue) = {
    obj.children.map(c =>
      Order(
        price = Price(
          n = (c \ "price_r" \ "n").extract[Int],
          d = (c \ "price_r" \ "d").extract[Int]
        ),
        quantity = Amount.toBaseUnits((c \ "amount").extract[String]).get
      ))
  }

  try {
    OrderBook(
      selling = asset(o \ "base"),
      buying = asset(o \ "counter"),
      bids = orders(o \ "bids"),
      asks = orders(o \ "asks")
    )
  } catch {
    case t: Throwable => throw new RuntimeException(pretty(render(o)), t)
  }
}) 
Example 50
Source File: Transacted.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.op

import java.time.ZonedDateTime

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk.model.response.ResponseParser


case class Transacted[+O <: Operation](id: Long,
                                      txnHash: String,
                                      createdAt: ZonedDateTime,
                                      operation: O)

object TransactedOperationDeserializer extends ResponseParser[Transacted[Operation]]({ o: JObject =>
  implicit val formats = DefaultFormats + OperationDeserializer

  def date(key: String) = ZonedDateTime.parse((o \ key).extract[String])

  Transacted(
    id = (o \ "id").extract[String].toLong,
    txnHash = (o \ "transaction_hash").extract[String],
    createdAt = date("created_at"),
    operation = o.extract[Operation])
}) 
Example 51
Source File: LedgerResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import java.time.ZonedDateTime

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk.model.{Amount, NativeAmount}

case class LedgerResponse(id: String, hash: String, previousHash: Option[String], sequence: Long, successTransactionCount: Int,
                          failureTransactionCount: Int, operationCount: Int, closedAt: ZonedDateTime,
                          totalCoins: NativeAmount, feePool: NativeAmount, baseFee: NativeAmount, baseReserve: NativeAmount,
                          maxTxSetSize: Int) {

  def transactionCount: Int = successTransactionCount + failureTransactionCount

}

object LedgerRespDeserializer extends ResponseParser[LedgerResponse]({ o: JObject =>
  implicit val formats = DefaultFormats

  LedgerResponse(
    id = (o \ "id").extract[String],
    hash = (o \ "hash").extract[String],
    previousHash = (o \ "prev_hash").extractOpt[String],
    sequence = (o \ "sequence").extract[Long],
    successTransactionCount = (o \ "successful_transaction_count").extract[Int],
    failureTransactionCount = (o \ "failed_transaction_count").extract[Int],
    operationCount = (o \ "operation_count").extract[Int],
    closedAt = ZonedDateTime.parse((o \ "closed_at").extract[String]),
    totalCoins = Amount.toBaseUnits((o \ "total_coins").extract[String]).map(NativeAmount.apply).get,
    feePool = Amount.toBaseUnits((o \ "fee_pool").extract[String]).map(NativeAmount.apply).get,
    baseFee = NativeAmount((o \ "base_fee").extractOpt[Long].getOrElse((o \ "base_fee_in_stroops").extract[Long])),
    baseReserve = {
      val old: Option[Long] = (o \ "base_reserve").extractOpt[String].map(_.toDouble).map(Amount.toBaseUnits).map(_.get)
      NativeAmount(old.getOrElse((o \ "base_reserve_in_stroops").extract[Long]))
    },
    maxTxSetSize = (o \ "max_tx_set_size").extract[Int]
  )
}) 
Example 52
Source File: OfferResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import java.time.ZonedDateTime

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk._
import stellar.sdk.model._

case class OfferResponse(id: Long, seller: PublicKeyOps, selling: Amount, buying: Asset, price: Price,
                         lastModifiedLedger: Long, lastModifiedTime: ZonedDateTime) {

  override def toString = {
    s"Offer $id: ${seller.accountId} selling $selling, buying $buying @ rate $price"
  }
}

object OfferRespDeserializer extends ResponseParser[OfferResponse]({ o: JObject =>
  implicit val formats = DefaultFormats
  val id = (o \ "id").extract[String].toLong

  def account(accountKey: String = "account") = KeyPair.fromAccountId((o \ accountKey).extract[String])

  def asset(prefix: String = "", issuerKey: String = "asset_issuer") = {
    def assetCode = (o \ prefix \ "asset_code").extract[String]

    def assetIssuer = KeyPair.fromAccountId((o \ prefix \ issuerKey).extract[String])

    (o \ prefix \ "asset_type").extract[String] match {
      case "native" => NativeAsset
      case "credit_alphanum4" => IssuedAsset4(assetCode, assetIssuer)
      case "credit_alphanum12" => IssuedAsset12(assetCode, assetIssuer)
      case t => throw new RuntimeException(s"Unrecognised asset type '$t'")
    }
  }

  def doubleFromString(key: String) = (o \ key).extract[String].toDouble

  def amount(prefix: String = "") = {
    val units = Amount.toBaseUnits(doubleFromString("amount")).get
    asset(prefix) match {
      case nna: NonNativeAsset => IssuedAmount(units, nna)
      case NativeAsset => NativeAmount(units)
    }
  }

  def price = {
    val priceObj = o \ "price_r"
    Price(
      (priceObj \ "n").extract[Int],
      (priceObj \ "d").extract[Int]
    )
  }

  def lastModifiedLedger = (o \ "last_modified_ledger").extract[Long]

  def lastModifiedTime = ZonedDateTime.parse((o \ "last_modified_time").extract[String])

  OfferResponse(id, account("seller"), amount("selling"), asset("buying"), price, lastModifiedLedger, lastModifiedTime)
}) 
Example 53
Source File: FederationResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk.model._
import stellar.sdk.util.ByteArrays.{hexToBytes, trimmedByteArray}
import stellar.sdk.{KeyPair, PublicKey}

case class FederationResponse(address: String,
                              account: PublicKey,
                              memo: Memo = NoMemo)

object FederationResponseDeserialiser extends ResponseParser[FederationResponse]({ o: JObject =>
  implicit val formats = DefaultFormats

//  println(JsonMethods.pretty(JsonMethods.render(o)))

  FederationResponse(
    // reference server erroneously fails to set `stellar_address` for forward lookups
    address = (o \ "stellar_address").extractOpt[String].orNull,
    // reference server erroneously fails to set `account_id` for reverse lookups
    account = (o \ "account_id").extractOpt[String].map(KeyPair.fromAccountId).orNull,
    memo = (o \ "memo_type").extractOpt[String] match {
      case Some("id") => MemoId((o \ "memo").extract[String].toLong)
      case Some("text") => MemoText((o \ "memo").extract[String])
      case Some("hash") => MemoHash(trimmedByteArray(hexToBytes((o \ "memo").extract[String])))
      case _ => NoMemo
    }
  )
}) 
Example 54
Source File: TransactionPostResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk.Network
import stellar.sdk.model.ledger.{LedgerEntryChange, LedgerEntryChanges, TransactionLedgerEntries}
import stellar.sdk.model.result._
import stellar.sdk.model.{NativeAmount, SignedTransaction}

sealed abstract class TransactionPostResponse(envelopeXDR: String, resultXDR: String) {
  val isSuccess: Boolean

  def transaction(implicit network: Network): SignedTransaction = SignedTransaction.decodeXDR(envelopeXDR)

  def feeCharged: NativeAmount

  
case class TransactionRejected(status: Int, detail: String,
                               resultCode: String, opResultCodes: Seq[String],
                               envelopeXDR: String, resultXDR: String)
  extends TransactionPostResponse(envelopeXDR, resultXDR) {

  override val isSuccess: Boolean = false

  // -- unroll nested XDR deserialised object into this object for convenience
  lazy val result: TransactionNotSuccessful = TransactionResult.decodeXDR(resultXDR).asInstanceOf[TransactionNotSuccessful]

  def feeCharged: NativeAmount = result.feeCharged
}



object TransactionPostResponseDeserializer extends ResponseParser[TransactionPostResponse]({
  o: JObject =>
    implicit val formats = DefaultFormats

    (o \ "type").extractOpt[String] match {

      case Some("https://stellar.org/horizon-errors/transaction_failed") =>
        TransactionRejected(
          status = (o \ "status").extract[Int],
          detail = (o \ "detail").extract[String],
          resultCode = (o \ "extras" \ "result_codes" \ "transaction").extract[String],
          opResultCodes = (o \ "extras" \ "result_codes" \ "operations").extract[Seq[String]],
          resultXDR = (o \ "extras" \ "result_xdr").extract[String],
          envelopeXDR = (o \ "extras" \ "envelope_xdr").extract[String]
        )

      case _ =>
        TransactionApproved(
          hash = (o \ "hash").extract[String],
          ledger = (o \ "ledger").extract[Long],
          envelopeXDR = (o \ "envelope_xdr").extract[String],
          resultXDR = (o \ "result_xdr").extract[String],
          resultMetaXDR = (o \ "result_meta_xdr").extract[String]
        )
    }
}) 
Example 55
Source File: FeeStatsResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import org.json4s.native.JsonMethods
import org.json4s.{DefaultFormats, JObject}
import stellar.sdk.model.NativeAmount

case class FeeStatsResponse(lastLedger: Long,
                            lastLedgerBaseFee: NativeAmount,
                            ledgerCapacityUsage: Double,
                            maxFees: FeeStats,
                            chargedFees: FeeStats) {

  @deprecated("Use `chargedFees.min` instead.", "v0.11.0")
  def minAcceptedFee: NativeAmount = chargedFees.min

  @deprecated("Use `chargedFees.mode` instead.", "v0.11.0")
  def modeAcceptedFee: NativeAmount = chargedFees.mode

  @deprecated("Use `chargedFees.percentiles` instead.", "v0.11.0")
  def acceptedFeePercentiles: Map[Int, NativeAmount] = chargedFees.percentiles

}

case class FeeStats(min: NativeAmount,
                    mode: NativeAmount,
                    max: NativeAmount,
                    percentiles: Map[Int, NativeAmount])

object FeeStatsRespDeserializer extends ResponseParser[FeeStatsResponse]({ o: JObject =>
  implicit val formats = DefaultFormats + FeeStatsDeserializer

  def amount(field: String): NativeAmount = NativeAmount((o \ field).extract[String].toLong)

  val lastLedger = (o \ "last_ledger").extract[String].toLong
  val lastLedgerBaseFee = amount("last_ledger_base_fee")
  val ledgerCapacityUsage = (o \ "ledger_capacity_usage").extract[String].toDouble
  val maxFees = (o \ "max_fee").extract[FeeStats]
  val chargedFees = (o \ "fee_charged").extract[FeeStats]

  FeeStatsResponse(lastLedger, lastLedgerBaseFee, ledgerCapacityUsage, maxFees, chargedFees)
})

object FeeStatsDeserializer extends ResponseParser[FeeStats]({ o: JObject =>
  implicit val formats = DefaultFormats

  def amount(field: String): NativeAmount = NativeAmount((o \ field).extract[String].toLong)

  FeeStats(
    min = amount("min"),
    mode = amount("mode"),
    max = amount("max"),
    percentiles = Map(
      10 -> amount("p10"),
      20 -> amount("p20"),
      30 -> amount("p30"),
      40 -> amount("p40"),
      50 -> amount("p50"),
      60 -> amount("p60"),
      70 -> amount("p70"),
      80 -> amount("p80"),
      90 -> amount("p90"),
      95 -> amount("p95"),
      99 -> amount("p99")
    ))
}) 
Example 56
Source File: AccountResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import java.nio.charset.StandardCharsets.UTF_8

import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonAST.{JArray, JObject}
import stellar.sdk._
import stellar.sdk.model.Amount.toBaseUnits
import stellar.sdk.model._
import stellar.sdk.util.ByteArrays

case class AccountResponse(id: PublicKey,
                           lastSequence: Long,
                           subEntryCount: Int,
                           thresholds: Thresholds,
                           authRequired: Boolean,
                           authRevocable: Boolean,
                           balances: List[Balance],
                           signers: List[Signer],
                           data: Map[String, Array[Byte]]) {

  def toAccount: Account = Account(AccountId(id.publicKey), lastSequence + 1)

  def decodedData: Map[String, String] = data.map { case (k, v) => k -> new String(v, UTF_8) }
}

object AccountRespDeserializer extends ResponseParser[AccountResponse]({ o: JObject =>
  implicit val formats: Formats = DefaultFormats
  val id = KeyPair.fromAccountId((o \ "id").extract[String])
  val seq = (o \ "sequence").extract[String].toLong
  val subEntryCount = (o \ "subentry_count").extract[Int]
  val lowThreshold = (o \ "thresholds" \ "low_threshold").extract[Int]
  val mediumThreshold = (o \ "thresholds" \ "med_threshold").extract[Int]
  val highThreshold = (o \ "thresholds" \ "high_threshold").extract[Int]
  val authRequired = (o \ "flags" \ "auth_required").extract[Boolean]
  val authRevocable = (o \ "flags" \ "auth_revocable").extract[Boolean]
  val JArray(jsBalances) = o \ "balances"
  val balances = jsBalances.map {
    case balObj: JObject =>
      val units = toBaseUnits((balObj \ "balance").extract[String].toDouble).get
      val amount = (balObj \ "asset_type").extract[String] match {
        case "credit_alphanum4" =>
          Amount(units, IssuedAsset4(
            code = (balObj \ "asset_code").extract[String],
            issuer = KeyPair.fromAccountId((balObj \ "asset_issuer").extract[String])
          ))
        case "credit_alphanum12" =>
          Amount(units, IssuedAsset12(
            code = (balObj \ "asset_code").extract[String],
            issuer = KeyPair.fromAccountId((balObj \ "asset_issuer").extract[String])
          ))
        case "native" => NativeAmount(units)
        case t => throw new RuntimeException(s"Unrecognised asset type: $t")
      }
      val limit = (balObj \ "limit").extractOpt[String].map(BigDecimal(_)).map(toBaseUnits).map(_.get)
      val buyingLiabilities = toBaseUnits(BigDecimal((balObj \ "buying_liabilities").extract[String])).get
      val sellingLiabilities = toBaseUnits(BigDecimal((balObj \ "selling_liabilities").extract[String])).get
      val authorised = (balObj \ "is_authorized").extractOpt[Boolean].getOrElse(false)
      val authorisedToMaintainLiabilities = (balObj \ "is_authorized_to_maintain_liabilities")
        .extractOpt[Boolean].getOrElse(false)

      Balance(amount, limit, buyingLiabilities, sellingLiabilities, authorised, authorisedToMaintainLiabilities)
    case _ => throw new RuntimeException(s"Expected js object at 'balances'")
  }
  val JArray(jsSigners) = o \ "signers"
  val signers = jsSigners.map {
    case signerObj: JObject =>
      val key = StrKey.decodeFromString((signerObj \ "key").extract[String]).asInstanceOf[SignerStrKey]
      val weight = (signerObj \ "weight").extract[Int]
      Signer(key, weight)
    case _ => throw new RuntimeException(s"Expected js object at 'signers'")
  }
  val JObject(dataFields) = o \ "data"
  val data = dataFields.map{ case (k, v) => k -> ByteArrays.base64(v.extract[String]) }.toMap

  AccountResponse(id, seq, subEntryCount, Thresholds(lowThreshold, mediumThreshold, highThreshold),
    authRequired, authRevocable, balances, signers, data)

}) 
Example 57
Source File: NetworkInfo.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject


case class NetworkInfo(horizonVersion: String,
                       coreVersion: String,
                       earliestLedger: Long,
                       latestLedger: Long,
                       passphrase: String,
                       currentProtocolVersion: Int,
                       supportedProtocolVersion: Int)

object NetworkInfoDeserializer extends ResponseParser[NetworkInfo]({ o: JObject =>
  implicit val formats = DefaultFormats

  NetworkInfo(
    horizonVersion = (o \ "horizon_version").extract[String],
    coreVersion = (o \ "core_version").extract[String],
    earliestLedger = (o \ "history_elder_ledger").extract[Long],
    latestLedger = (o \ "history_latest_ledger").extract[Long],
    passphrase = (o \ "network_passphrase").extract[String],
    currentProtocolVersion = (o \ "current_protocol_version").extract[Int],
    supportedProtocolVersion = (o \ "core_supported_protocol_version").extract[Int]
  )
}) 
Example 58
Source File: AssetResponse.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.response

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk._
import stellar.sdk.model.{Amount, IssuedAsset12, IssuedAsset4, NonNativeAsset}

case class AssetResponse(asset: NonNativeAsset, amount: Long, numAccounts: Int, authRequired: Boolean, authRevocable: Boolean)

object AssetRespDeserializer extends ResponseParser[AssetResponse]({ o: JObject =>
  implicit val formats = DefaultFormats
  val asset = {
    val code = (o \ "asset_code").extract[String]
    val issuer = KeyPair.fromAccountId((o \ "asset_issuer").extract[String])
    (o \ "asset_type").extract[String] match {
      case "credit_alphanum4" => IssuedAsset4(code, issuer)
      case "credit_alphanum12" => IssuedAsset12(code, issuer)
      case t => throw new RuntimeException(s"Unrecognised asset type: $t")
    }
  }
  val amount = Amount.toBaseUnits((o \ "amount").extract[String].toDouble).getOrElse(
    throw new RuntimeException(s"Invalid asset amount: ${(o \ "amount").extract[Double]}"))
  val numAccounts = (o \ "num_accounts").extract[Int]
  val authRequired = (o \ "flags" \ "auth_required").extract[Boolean]
  val authRevocable = (o \ "flags" \ "auth_revocable").extract[Boolean]
  AssetResponse(asset, amount, numAccounts, authRequired, authRevocable)
}) 
Example 59
Source File: PaymentPath.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model

import org.json4s.JsonAST.JObject
import org.json4s.{DefaultFormats, Formats, JArray, JValue}
import stellar.sdk.KeyPair
import stellar.sdk.model.AmountParser.{AssetDeserializer, parseAsset}
import stellar.sdk.model.response.ResponseParser

case class PaymentPath(source: Amount, destination: Amount, path: Seq[Asset])

object PaymentPathDeserializer extends ResponseParser[PaymentPath]({
  o: JObject =>
    implicit val formats = DefaultFormats
    implicit val assetDeserializer = AssetDeserializer
    
    PaymentPath(
      source = AmountParser.amount("source_", o),
      destination = AmountParser.amount("destination_", o),
      path = {
        val JArray(values) = (o \ "path").extract[JArray]
        values.map { jv => parseAsset("", jv) }
      }
    )
})

object AmountParser {

  implicit val formats = DefaultFormats

  def parseAsset(prefix: String, o: JValue)(implicit formats: Formats): Asset = {
    val assetType = (o \ s"${prefix}asset_type").extract[String]
    def code = (o \ s"${prefix}asset_code").extract[String]
    def issuer = KeyPair.fromAccountId((o \ s"${prefix}asset_issuer").extract[String])
    assetType match {
      case "native" => NativeAsset
      case "credit_alphanum4" => IssuedAsset4(code, issuer)
      case "credit_alphanum12" => IssuedAsset12(code, issuer)
      case t => throw new RuntimeException(s"Unrecognised ${prefix}asset type: $t")
    }
  }

  def amount(prefix: String, o: JObject)(implicit formats: Formats): Amount = {
    val asset = parseAsset(prefix, o)
    val units = Amount.toBaseUnits((o \ s"${prefix}amount").extract[String]).get
    Amount(units, asset)
  }

  object AssetDeserializer extends ResponseParser[Asset](parseAsset("", _))
} 
Example 60
Source File: Trade.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model

import java.time.ZonedDateTime

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import stellar.sdk.model.response.ResponseParser
import stellar.sdk.{KeyPair, PublicKeyOps}

case class Trade(id: String, ledgerCloseTime: ZonedDateTime, offerId: Long,
                 baseOfferId: Long, counterOfferId: Long,
                 baseAccount: PublicKeyOps, baseAmount: Amount,
                 counterAccount: PublicKeyOps, counterAmount: Amount,
                 baseIsSeller: Boolean)


object TradeDeserializer extends ResponseParser[Trade]({
  o: JObject =>
    implicit val formats = DefaultFormats

    def account(accountKey: String = "account") = KeyPair.fromAccountId((o \ accountKey).extract[String])

    def date(key: String) = ZonedDateTime.parse((o \ key).extract[String])

    def doubleFromString(key: String) = (o \ key).extract[String].toDouble

    def asset(prefix: String = "", issuerKey: String = "asset_issuer") = {
      def assetCode = (o \ s"${prefix}asset_code").extract[String]

      def assetIssuer = KeyPair.fromAccountId((o \ s"$prefix$issuerKey").extract[String])

      (o \ s"${prefix}asset_type").extract[String] match {
        case "native" => NativeAsset
        case "credit_alphanum4" => IssuedAsset4(assetCode, assetIssuer)
        case "credit_alphanum12" => IssuedAsset12(assetCode, assetIssuer)
        case t => throw new RuntimeException(s"Unrecognised asset type '$t'")
      }
    }

    def amount(prefix: String = "") = {
      val units = Amount.toBaseUnits(doubleFromString(s"${prefix}amount")).get
      asset(prefix) match {
        case nna: NonNativeAsset => IssuedAmount(units, nna)
        case NativeAsset => NativeAmount(units)
      }
    }

    Trade(
      id = (o \ "id").extract[String],
      ledgerCloseTime = date("ledger_close_time"),
      offerId = (o \ "offer_id").extract[String].toLong,
      baseOfferId = (o \ "base_offer_id").extract[String].toLong,
      counterOfferId = (o \ "counter_offer_id").extract[String].toLong,
      baseAccount = account("base_account"),
      baseAmount = amount("base_"),
      counterAccount = account("counter_account"),
      counterAmount = amount("counter_"),
      baseIsSeller = (o \ "base_is_seller").extract[Boolean]
    )
}) 
Example 61
Source File: TradeAggregation.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model

import java.time.Instant
import java.util.concurrent.TimeUnit

import org.json4s.JsonAST.JObject
import org.json4s.{DefaultFormats, JValue}
import stellar.sdk.model.response.ResponseParser

import scala.concurrent.duration.Duration

case class TradeAggregation(instant: Instant, tradeCount: Int, baseVolume: Double, counterVolume: Double,
                            average: Double, open: Price, high: Price, low: Price, close: Price)

object TradeAggregationDeserializer extends ResponseParser[TradeAggregation]({ o: JObject =>
  implicit val formats = DefaultFormats

  def price(p: JValue): Price = Price((p \ "N").extract[Int], (p \ "D").extract[Int])

  TradeAggregation(
    instant = Instant.ofEpochMilli((o \ "timestamp").extract[String].toLong),
    tradeCount = (o \ "trade_count").extract[String].toInt,
    baseVolume = (o \ "base_volume").extract[String].toDouble,
    counterVolume = (o \ "counter_volume").extract[String].toDouble,
    average = (o \ "avg").extract[String].toDouble,
    open = price(o \ "open_r"),
    high = price(o \ "high_r"),
    low = price(o \ "low_r"),
    close = price(o \ "close_r"))
})

object TradeAggregation {

  sealed class Resolution(val duration: Duration)

  val OneMinute = new Resolution(Duration.create(1, TimeUnit.MINUTES))
  val FiveMinutes = new Resolution(OneMinute.duration * 5.0)
  val FifteenMinutes = new Resolution(FiveMinutes.duration * 3.0)
  val OneHour = new Resolution(FifteenMinutes.duration * 4.0)
  val OneDay = new Resolution(OneHour.duration * 24.0)
  val OneWeek = new Resolution(OneDay.duration * 7.0)

} 
Example 62
Source File: TransactionHistory.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model.result

import java.time.ZonedDateTime

import org.json4s.{DefaultFormats, Formats}
import org.json4s.JsonAST.JObject
import stellar.sdk.model._
import stellar.sdk.model.ledger.TransactionLedgerEntries.arr
import stellar.sdk.model.ledger.{LedgerEntryChange, LedgerEntryChanges, TransactionLedgerEntries}
import stellar.sdk.model.response.ResponseParser
import stellar.sdk.util.ByteArrays.base64
import stellar.sdk.{KeyPair, PublicKey}

import scala.util.Try


case class TransactionHistory(hash: String, ledgerId: Long, createdAt: ZonedDateTime, account: PublicKey,
                              sequence: Long, maxFee: NativeAmount, feeCharged: NativeAmount, operationCount: Int,
                              memo: Memo, signatures: Seq[String], envelopeXDR: String, resultXDR: String,
                              resultMetaXDR: String, feeMetaXDR: String, validAfter: Option[ZonedDateTime],
                              validBefore: Option[ZonedDateTime], feeBump: Option[FeeBumpHistory]) {

  lazy val result: TransactionResult = TransactionResult.decodeXDR(resultXDR)

  def ledgerEntries: TransactionLedgerEntries = TransactionLedgerEntries.decodeXDR(resultMetaXDR)
  def feeLedgerEntries: Seq[LedgerEntryChange] = LedgerEntryChanges.decodeXDR(feeMetaXDR)

  @deprecated("Replaced by `feeCharged`", "v0.7.2")
  val feePaid: NativeAmount = feeCharged

}


object TransactionHistoryDeserializer extends {
} with ResponseParser[TransactionHistory]({
  o: JObject =>
    implicit val formats: Formats = DefaultFormats

    val maxFee = NativeAmount((o \ "max_fee").extract[String].toInt)
    val signatures = (o \ "signatures").extract[List[String]]
    val hash = (o \ "hash").extract[String]

    val inner = for {
      hash <- (o \ "inner_transaction" \ "hash").extractOpt[String]
      maxFee <- (o \ "inner_transaction" \ "max_fee").extractOpt[Int].map(NativeAmount(_))
      signatures <- (o \ "inner_transaction" \ "signatures").extractOpt[List[String]]
    } yield (hash, maxFee, signatures)

    TransactionHistory(
      hash = inner.map(_._1).getOrElse(hash),
      ledgerId = (o \ "ledger").extract[Long],
      createdAt = ZonedDateTime.parse((o \ "created_at").extract[String]),
      account = KeyPair.fromAccountId((o \ "source_account").extract[String]),
      sequence = (o \ "source_account_sequence").extract[String].toLong,
      maxFee = inner.map(_._2).getOrElse(maxFee),
      feeCharged = NativeAmount((o \ "fee_charged").extract[String].toInt),
      operationCount = (o \ "operation_count").extract[Int],
      memo = (o \ "memo_type").extract[String] match {
        case "none" => NoMemo
        case "id" => MemoId(BigInt((o \ "memo").extract[String]).toLong)
        case "text" => MemoText((o \ "memo").extractOpt[String].getOrElse(""))
        case "hash" => MemoHash(base64((o \ "memo").extract[String]).toIndexedSeq)
        case "return" => MemoReturnHash(base64((o \ "memo").extract[String]).toIndexedSeq)
      },
      signatures = inner.map(_._3).getOrElse(signatures),
      envelopeXDR = (o \ "envelope_xdr").extract[String],
      resultXDR = (o \ "result_xdr").extract[String],
      resultMetaXDR = (o \ "result_meta_xdr").extract[String],
      feeMetaXDR = (o \ "fee_meta_xdr").extract[String],
      // TODO (jem) - Remove the Try wrappers when https://github.com/stellar/go/issues/1381 is fixed.
      validBefore = Try((o \ "valid_before").extractOpt[String].map(ZonedDateTime.parse)).getOrElse(None),
      validAfter = Try((o \ "valid_after").extractOpt[String].map(ZonedDateTime.parse)).getOrElse(None),
      feeBump = inner.map { _ => FeeBumpHistory(maxFee, hash, signatures) }
    )
}) 
Example 63
Source File: HorizonServerError.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.inet

import okhttp3.HttpUrl
import org.json4s.native.JsonMethods
import org.json4s.{DefaultFormats, Formats, JObject, JValue}

import scala.concurrent.duration.Duration
import scala.util.Try

case class HorizonServerError(uri: HttpUrl, body: JObject)(implicit val formats: Formats) extends Exception(
  s"Server error when communicating with Horizon. $uri -> ${
    implicit val formats: Formats = DefaultFormats
    Try((body \ "detail").extract[String]).getOrElse(JsonMethods.compact(JsonMethods.render(body)))
  }"
)

case class HorizonEntityNotFound(uri: HttpUrl, body: JValue)(implicit val formats: Formats) extends Exception(
  s"Requested entity was not found in Horizon. $uri -> ${
    implicit val formats: Formats = DefaultFormats
    Try((body \ "detail").extract[String]).getOrElse(JsonMethods.compact(JsonMethods.render(body)))
  }"
)

case class HorizonRateLimitExceeded(uri: HttpUrl, retryAfter: Duration)(implicit val formats: Formats) extends Exception(
  s"Horizon request rate limit was exceeded. Try again in $retryAfter"
)

case class HorizonBadRequest(uri: HttpUrl, body: String) extends Exception(
  s"Bad request. $uri -> ${
    implicit val formats: Formats = DefaultFormats
    Try(
      (JsonMethods.parse(body) \ "extras" \ "reason").extract[String]
    ).getOrElse(body)
  }")

case class FailedResponse(cause: String) extends Exception(cause) 
Example 64
Source File: PageParser.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.inet

import java.net.HttpURLConnection.{HTTP_BAD_REQUEST, HTTP_NOT_FOUND}

import okhttp3.HttpUrl
import org.json4s.{DefaultFormats, Formats}
import org.json4s.native.JsonMethods

import scala.reflect.ClassTag

object PageParser {

  def parse[T: ClassTag](url: HttpUrl, responseCode: Int, body: => String)
                        (implicit m: Manifest[T], customFormats: Formats): Page[T] = {

    responseCode match {
      case HTTP_NOT_FOUND => Page(List.empty[T], None)
      case HTTP_BAD_REQUEST => throw HorizonBadRequest(url, body)
      case _ =>
        JsonMethods.parse(body)
          .extract[RawPage]
          .parse[T](url)
    }
  }
} 
Example 65
Source File: PageSpec.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.inet

import java.net.HttpURLConnection.{HTTP_BAD_REQUEST, HTTP_NOT_FOUND}

import okhttp3.HttpUrl
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.native.JsonMethods
import org.specs2.mutable.Specification
import stellar.sdk.model.response.ResponseParser

class PageSpec extends Specification {

  implicit val formats = DefaultFormats + RawPageDeserializer + HelloDeserializer

  "page parsing" should {
    "return an empty page if no results were found" >> {
      val page = PageParser.parse[String](HttpUrl.parse("http://localhost/"), HTTP_NOT_FOUND, "")
      page.xs must beEmpty
    }

    "throw a bad request exception with the reasons when provided" >> {
      val url = HttpUrl.parse("http://localhost/")
      PageParser.parse[String](url, HTTP_BAD_REQUEST,
        """{
          |  "type": "https://stellar.org/horizon-errors/bad_request",
          |  "title": "Bad Request",
          |  "status": 400,
          |  "detail": "The request you sent was invalid in some way.",
          |  "extras": {
          |    "invalid_field": "cursor",
          |    "reason": "cursor must contain exactly one colon"
          |  }
          |}""".stripMargin) must throwA[HorizonBadRequest].like { e =>
        e.getMessage mustEqual "Bad request. http://localhost/ -> cursor must contain exactly one colon"
      }
    }

    "throw a bad request exception with the full document when the reason is not provided" >> {
      val url = HttpUrl.parse("http://localhost/")
      PageParser.parse[String](url, HTTP_BAD_REQUEST, "random text") must throwA[HorizonBadRequest].like { e =>
        e.getMessage mustEqual "Bad request. http://localhost/ -> random text"
      }
    }

    "parse the member values and provide a link to the next page" >> {
      val doc =
        """
          |{
          |  "_links": {
          |    "self": {
          |      "href": "https://horizon-testnet.stellar.org/hello?cursor=\u0026limit=10\u0026order=asc"
          |    },
          |    "next": {
          |      "href": "https://horizon-testnet.stellar.org/hello?cursor=2045052972961793-0\u0026limit=10\u0026order=asc"
          |    },
          |    "prev": {
          |      "href": "https://horizon-testnet.stellar.org/hello?cursor=940258535411713-0\u0026limit=10\u0026order=desc"
          |    }
          |  },
          |  "_embedded": {
          |    "records": [
          |      {"hello":"world"},
          |      {"hello":"whirled"}
          |    ]
          |  }
          |}
        """.stripMargin

      JsonMethods.parse(doc).extract[RawPage].parse[String](HttpUrl.parse("http://localhost/")) mustEqual Page(
        List("world", "whirled"),
        nextLink = Some(HttpUrl.parse("https://horizon-testnet.stellar.org/hello?cursor=2045052972961793-0&limit=10&order=asc"))
      )
    }
  }

  object HelloDeserializer extends ResponseParser[String]({ o: JObject =>
    implicit val formats = DefaultFormats
    (o \ "hello").extract[String]
  })

} 
Example 66
Source File: KerasModel.scala    From jigg   with Apache License 2.0 5 votes vote down vote up
package jigg.ml.keras



import breeze.linalg.DenseMatrix
import jigg.util.HDF5Object
import org.json4s.jackson.JsonMethods._
import org.json4s.{DefaultFormats, _}

class KerasModel(model: HDF5Object) {

  private val kerasAttribute = model.checkAndGetAttribute("keras_version")
  private val modelAttribute = model.checkAndGetAttribute("model_config")

  private val weightGroups = model.checkAndGetGroup("model_weights")

  def parseConfigToSeq(config: String): Seq[Map[String, Any]] = {
    val jsonValue = parse(config)
    implicit val formats = DefaultFormats
    val jsonList = jsonValue.extract[Map[String, Any]]
    jsonList("config").asInstanceOf[Seq[Map[String, Any]]]
  }

  private val modelValues = parseConfigToSeq(modelAttribute.getValue(0).toString)

  def getConfigs(x: Map[String, Any]): Map[String, Any] = x("config").asInstanceOf[Map[String,Any]]

  def constructNetwork(values: Seq[Map[String, Any]]): Seq[Functor] = values.map{
    x => {
      val configs = getConfigs(x)
      val functor = x("class_name").toString match {
        case "Activation" =>
          configs("activation").toString match{
            case "relu" => Relu
            case "softmax" => Softmax
            case "sigmoid" => Sigmoid
            case "tanh" => Tanh
          }
        case "Convolution1D" =>
          Convolution1D(configs, weightGroups)
        case "Dense" =>
          Dense(configs, weightGroups)
        case "Embedding" =>
          Embedding(configs, weightGroups)
        case "Flatten" => Flatten
        case _ => Empty
      }
      functor
    }
  }

  private val graph:Seq[Functor] = constructNetwork(modelValues)

  def convert(input: DenseMatrix[Float]): DenseMatrix[Float] = callFunctors(input, graph)

  private def callFunctors(input: DenseMatrix[Float], unprocessed:Seq[Functor]): DenseMatrix[Float] = unprocessed match {
    case functor :: tail =>
      val interOutput = functor.convert(input)
      callFunctors(interOutput, tail)
    case Nil => input
  }
} 
Example 67
Source File: LookupTable.scala    From jigg   with Apache License 2.0 5 votes vote down vote up
package jigg.util



import java.io.Reader

import breeze.linalg.DenseMatrix
import org.json4s.{DefaultFormats, _}
import org.json4s.jackson.JsonMethods
import org.json4s.JsonAST.JValue

class LookupTable(rawTable: JValue) {

  implicit private val formats = DefaultFormats
  private val tables = rawTable.extract[Map[String, Map[String, Map[String, String]]]]

  private val key2id = tables("_lookup")("_key2id")
  private val id2key = tables("_lookup")("_id2key")

  // For raw text
  def encodeCharacter(str: String): DenseMatrix[Float] = {
    val strArray = str.map{x =>
      // Note: For skipping unknown character, this encoder returns dummy id.
      key2id.getOrElse(x.toString, "3").toFloat
    }.toArray
    new DenseMatrix[Float](1, str.length, strArray)
  }

  // For list of words
  def encodeWords(words: Array[String]): DenseMatrix[Float] = {
    val wordsArray = words.map{x =>
      // Note: For skipping unknown words, this encoder returns dummy id.
      key2id.getOrElse(x.toString, "3").toFloat
    }
    new DenseMatrix[Float](1, words.length, wordsArray)
  }

  def decode(data: DenseMatrix[Float]): Array[String] =
    data.map{x => id2key.getOrElse(x.toInt.toString, "NONE")}.toArray

  def getId(key: String): Int = key2id.getOrElse(key, "0").toInt
  def getId(key: Char): Int = getId(key.toString)

  def getKey(id: Int): String = id2key.getOrElse(id.toString, "UNKNOWN")
}


object LookupTable {

  // Load from a path on the file system
  def fromFile(path: String) = mkTable(IOUtil.openIn(path))

  // Load from class loader
  def fromResource(path: String) = mkTable(IOUtil.openResourceAsReader(path))

  private def mkTable(input: Reader) = {
    val j = try { JsonMethods.parse(input) } finally { input.close }
    new LookupTable(j)
  }
} 
Example 68
Source File: DataTypeUtils.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.thriftserver.types

import org.json4s.{DefaultFormats, JValue}
import org.json4s.JsonAST.{JObject, JString}
import org.json4s.jackson.JsonMethods.parse


  def schemaFromSparkJson(sparkJson: String): Schema = {
    val schema = parse(sparkJson) \ "fields"
    val fields = schema.children.map { field =>
      val name = (field \ "name").extract[String]
      val hiveType = toFieldType(field \ "type")
      // TODO: retrieve comment from metadata
      Field(name, hiveType, "")
    }
    Schema(fields.toArray)
  }
} 
Example 69
Source File: RWrappers.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.r

import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

import org.apache.spark.SparkException
import org.apache.spark.ml.util.MLReader


private[r] object RWrappers extends MLReader[Object] {

  override def load(path: String): Object = {
    implicit val format = DefaultFormats
    val rMetadataPath = new Path(path, "rMetadata").toString
    val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
    val rMetadata = parse(rMetadataStr)
    val className = (rMetadata \ "class").extract[String]
    className match {
      case "org.apache.spark.ml.r.NaiveBayesWrapper" => NaiveBayesWrapper.load(path)
      case "org.apache.spark.ml.r.AFTSurvivalRegressionWrapper" =>
        AFTSurvivalRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper" =>
        GeneralizedLinearRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.KMeansWrapper" =>
        KMeansWrapper.load(path)
      case "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper" =>
        MultilayerPerceptronClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.LDAWrapper" =>
        LDAWrapper.load(path)
      case "org.apache.spark.ml.r.IsotonicRegressionWrapper" =>
        IsotonicRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GaussianMixtureWrapper" =>
        GaussianMixtureWrapper.load(path)
      case "org.apache.spark.ml.r.ALSWrapper" =>
        ALSWrapper.load(path)
      case "org.apache.spark.ml.r.LogisticRegressionWrapper" =>
        LogisticRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.RandomForestRegressorWrapper" =>
        RandomForestRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.RandomForestClassifierWrapper" =>
        RandomForestClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.GBTRegressorWrapper" =>
        GBTRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.GBTClassifierWrapper" =>
        GBTClassifierWrapper.load(path)
      case _ =>
        throw new SparkException(s"SparkR read.ml does not support load $className")
    }
  }
} 
Example 70
Source File: JsonVectorConverter.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}

private[ml] object JsonVectorConverter {

  
  def toJson(v: Vector): String = {
    v match {
      case SparseVector(size, indices, values) =>
        val jValue = ("type" -> 0) ~
          ("size" -> size) ~
          ("indices" -> indices.toSeq) ~
          ("values" -> values.toSeq)
        compact(render(jValue))
      case DenseVector(values) =>
        val jValue = ("type" -> 1) ~ ("values" -> values.toSeq)
        compact(render(jValue))
    }
  }
} 
Example 71
Source File: ResultLoader.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut

import java.io.File

import org.apache.commons.lang3.StringUtils
import org.json4s.{DefaultFormats, JValue}
import report.donut.gherkin.processors.JSONProcessor
import report.donut.transformers.cucumber.Feature

import scala.util.Try

trait ResultLoader {
  def load(): Either[String, List[Feature]]
}

object ResultLoader {

  private[donut] class CucumberResultLoader(sourceDir: File) extends ResultLoader {
    override def load(): Either[String, List[Feature]] = {
      if (!sourceDir.exists) {
        return Left(s"Source directory does not exist: $sourceDir")
      }

      val jsonValues = JSONProcessor.loadFrom(sourceDir) match {
        case Left(errors) => return Left(errors)
        case Right(r) => if (r.isEmpty) return Left("No files found of correct format") else Right(r)
      }

      Try(loadCukeFeatures(jsonValues.right.get)).toEither(_.getMessage)
    }

    private[donut] def loadCukeFeatures(json: List[JValue]) = {
      implicit val formats = DefaultFormats
      json.flatMap(f => f.extract[List[Feature]])
    }
  }

  def apply(resultSource: String): ResultLoader = {
    val pattern = "([a-zA-z]{2,}):(.*)".r
    pattern.findFirstMatchIn(resultSource) match {
      case Some(m) => {
        val format = m.group(1)
        val sourcePath = m.group(2)
        if (StringUtils.isBlank(sourcePath)) {
          throw new DonutException("Please provide the source directory path.")
        }
        format match {
          case "cucumber" => new CucumberResultLoader(new File(sourcePath))
          case _ => throw DonutException(s"Unsupported result format: $format")
        }
      }
      case None => new CucumberResultLoader(new File(resultSource)) //Defaults to cucumber result format
    }
  }
} 
Example 72
Source File: GroupByFeatureNameTest.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut.transformers.cucumber

import java.io.File

import org.json4s.DefaultFormats
import org.scalatest.{FlatSpec, Matchers}
import report.donut.DonutTestData
import report.donut.gherkin.model
import report.donut.gherkin.processors.JSONProcessor

import scala.collection.mutable.ListBuffer

class GroupByFeatureNameTest extends FlatSpec with Matchers {

  implicit val formats = DefaultFormats

  // BDD json files for same feature
  private val sample4RootDir = List("src", "test", "resources", "samples-4").mkString("", File.separator, File.separator)
  private val sample4Features = JSONProcessor.loadFrom(new File(sample4RootDir)).right.get.flatMap(f => f.extract[List[Feature]])
  private val sample4DonutFeatures = CucumberTransformer.transform(sample4Features, DonutTestData.statusConfiguration).right.get

  // Unit tests as BDD format json files
  private val sample5RootDir = List("src", "test", "resources", "samples-5").mkString("", File.separator, File.separator)
  private val sample5Features = JSONProcessor.loadFrom(new File(sample5RootDir)).right.get.flatMap(f => f.extract[List[Feature]])

  // BDD and Unit test json files in BDD format, but with different feature names
  private val sample6BDDRootDir = List("src", "test", "resources", "samples-6", "bdd").mkString("", File.separator, File.separator)
  private val sample6BDDFeatures = JSONProcessor.loadFrom(new File(sample6BDDRootDir)).right.get.flatMap(f => f.extract[List[Feature]])
  private val sample6BDDDonutFeatures = CucumberTransformer.transform(sample6BDDFeatures, DonutTestData.statusConfiguration).right.get

  private val sample6UnitTestsRootDir = List("src", "test", "resources", "samples-6", "unit").mkString("", File.separator, File.separator)
  private val sample6UnitTests = JSONProcessor.loadFrom(new File(sample6UnitTestsRootDir)).right.get.flatMap(f => f.extract[List[Feature]])

  behavior of "Cucumber transformer - Group by feature name"

  it should "group donut features by feature name while transforming the list of cucumber features" in {
    sample4DonutFeatures.size shouldBe 1
    sample4DonutFeatures.head.name shouldBe "Add numbers"

    val expectedScenarioNames = List("Add two numbers: 1 and 2", "Only 1 number is provided", "Add four numbers: 1,2,5,10")
    val scenarios = sample4DonutFeatures.head.scenarios
    scenarios.size shouldBe 3
    scenarios.map(s => s.name).sorted shouldBe expectedScenarioNames.sorted
  }

  it should "mapToDonutFeatures if a feature is split across multiple BDD json files" in {
    val donutFeatures = CucumberTransformer.mapToDonutFeatures(sample4Features, new ListBuffer[model.Feature], DonutTestData.statusConfiguration)
    val scenarios = donutFeatures.head.scenarios

    sample4Features.size shouldBe 3
    donutFeatures.size shouldBe 1
    scenarios.size shouldBe 3
    donutFeatures.head.index.toInt shouldBe 10000

    for (o <- sample4Features) {
      o.name == donutFeatures.head.name
    }
  }

  it should "mapToDonutFeatures if 1 feature is split across few BDD and unit test json files" in {
    val generatedFeatures = CucumberTransformer.mapToDonutFeatures(sample5Features, sample4DonutFeatures, DonutTestData.statusConfiguration)
    val scenarios = generatedFeatures.head.scenarios

    generatedFeatures.size shouldBe 1
    scenarios.size shouldBe 4
    scenarios(3).keyword shouldBe "Unit Test"
    scenarios(3).name should equal(sample5Features.head.elements.head.name)
  }

  it should "mapToDonutFeatures when there are few bdd json files and few unit test json files with a different feature name" in {
    val generatedFeatures = CucumberTransformer.mapToDonutFeatures(sample6UnitTests, sample6BDDDonutFeatures, DonutTestData.statusConfiguration)
    val nonBDDFeature = generatedFeatures(1)
    val nonBDDScenario = nonBDDFeature.scenarios.head
    val bddFeature = generatedFeatures.head

    generatedFeatures.size shouldBe 2
    bddFeature.name shouldBe "Add numbers"
    bddFeature.index shouldBe "10000"
    nonBDDFeature.name shouldBe "Without feature"
    nonBDDFeature.index shouldBe "10001"
    nonBDDScenario.name shouldBe "Add four numbers: 1,2,5,10"
    nonBDDScenario.keyword shouldBe "Unit Test"
  }
} 
Example 73
Source File: ResultLoaderTest.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut

import java.io.File

import org.json4s.DefaultFormats
import org.scalatest.{FlatSpec, Matchers}
import report.donut.ResultLoader.CucumberResultLoader
import report.donut.gherkin.processors.JSONProcessor

class ResultLoaderTest extends FlatSpec with Matchers {

  implicit val formats = DefaultFormats

  behavior of "ResultLoader"

  it should "return a CucumberResultLoader if result source specifies a cucumber format" in {
    val sourcePath = List("cucumber:target", "cucumber-reports").mkString("", File.separator, "")
    val loader = ResultLoader(sourcePath)
    loader.isInstanceOf[CucumberResultLoader] shouldBe true
  }

  it should "return a CucumberResultLoader if result source does not specify a format" in {
    val sourcePath = List("target", "cucumber-reports").mkString("", File.separator, "")
    val loader = ResultLoader(sourcePath)
    loader.isInstanceOf[CucumberResultLoader] shouldBe true
  }

  it should "throw a DonutException if the result source format is not supported" in {
    val sourcePath = List("junit:target", "failsafe-reports").mkString("", File.separator, "")
    val exception = intercept[DonutException] {
      ResultLoader(sourcePath)
    }
    assert(exception.mgs === "Unsupported result format: junit")
  }

  it should "throw a DonutException if the result source format is provided without the path" in {
    val exception = intercept[DonutException] {
      ResultLoader("cucumber:")
    }
    assert(exception.mgs === "Please provide the source directory path.")
  }

  it should "return a CucumberResultLoader if result source specifies a format and an absolute Windows path" in {
    val sourcePath = "cucumber:C:\\Users\\JDoe\\project\\target\\adapted-reports"
    val loader = ResultLoader(sourcePath)
    loader.isInstanceOf[CucumberResultLoader] shouldBe true
  }

  it should "return a CucumberResultLoader if result source specifies a format and an absolute Unix path" in {
    val sourcePath = "cucumber:/home/jdoe/project/target/cucumber-reports"
    val loader = ResultLoader(sourcePath)
    loader.isInstanceOf[CucumberResultLoader] shouldBe true
  }

  it should "return feature list if valid cucumber JSON files are found" in {
    val sourcePath = List("cucumber:src", "test", "resources", "all-pass").mkString("", File.separator, "")
    val loader = ResultLoader(sourcePath)
    loader.load() match {
      case Left(e) => fail(e)
      case Right(r) =>
        r should not be null
        r.size shouldBe 10
    }
  }

  it should "return an error message for a non-existent source directory" in {
    val sourcePath = List("cucumber:src", "test", "resources", "non-existent").mkString("", File.separator, "")
    val loader = ResultLoader(sourcePath)
    loader.load() shouldBe Left(s"Source directory does not exist: ${sourcePath.replace("cucumber:", "")}")
  }

  it should "return an error message if JSON files are not found in the source directory" in {
    val sourcePath = List("cucumber:src", "test", "resources", "samples-empty").mkString("", File.separator, "")
    val loader = ResultLoader(sourcePath)
    loader.load() shouldBe Left("No files found of correct format")
  }

  it should "return an error if parsing an invalid JSON file" in {
    val sourcePath = List("cucumber:src", "test", "resources", "samples-weirdos").mkString("", File.separator, "")
    val jsonPath = List("src", "test", "resources", "samples-weirdos", "invalid_format.json").mkString("", File.separator, "")
    val loader = ResultLoader(sourcePath)
    loader.load() shouldBe Left("Json could not be parsed for " + jsonPath + ",")
  }

  behavior of "ResultLoader units"

  it should "loadCukeFeatures" in {
    val rootDir = List("src", "test", "resources", "samples-7").mkString("", File.separator, "")
    val features = JSONProcessor.loadFrom(new File(rootDir)).right.get
    val loader = new CucumberResultLoader(new File(rootDir))
    loader.loadCukeFeatures(features).size shouldEqual 1
  }
} 
Example 74
Source File: FilterJsonGeoAndTime.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File

import org.clulab.wm.eidos.utils.FileUtils
import org.json4s.DefaultFormats
import org.json4s.JArray
import org.json4s.JNothing
import org.json4s.JObject
import org.json4s.JString
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonGeoAndTime extends App {

  class Filter() {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(inputFile: File, jValue: JValue): Unit = {
      println(s"Extracting from ${inputFile.getName}")

      def filterGeo(): Unit = {
        val geoLocations: JValue = (jValue \\ "geolocs" \ "text")

        geoLocations match {
          case JArray(geoLocations: List[_]) => // Type erasure removes the [JString]
            geoLocations.foreach { geoLocation =>
              val text = geoLocation.extract[String]
              val oneLiner = text
                  .replace("\n", "\\n")
                  .replace("\t", "\\t")

              println("\tGeo\t" + oneLiner)
            }
          case JNothing =>
          case _ => throw new RuntimeException(s"Unexpected geoLocations value: $geoLocations")
        }
      }

      def filterTime(): Unit = {
        val timexes: JValue = (jValue \\ "timexes" \ "text")

        timexes match {
          case JArray(timexes: List[_]) => // Type erasure removes the [JString]
            timexes.foreach { timex =>
              val text = timex.extract[String]
              val oneLiner = text
                  .replace("\n", "\\n")
                  .replace("\t", "\\t")

              println("\tTime\t" + oneLiner)
            }
          case JNothing =>
          case _ => throw new RuntimeException(s"Unexpected geoLocations value: $timexes")
        }
      }

      filterGeo()
      filterTime()
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val inputFiles = FileUtils.findFiles(inputDir, extension)
  val filter = new Filter()

  inputFiles.foreach { inputFile =>
    val text = FileUtils.getTextFromFile(inputFile)
    val json = JsonMethods.parse(text)
    filter.filter(inputFile, json)
  }
} 
Example 75
Source File: FilterJson.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File

import org.clulab.wm.eidos.utils.FileUtils
import org.json4s.DefaultFormats
import org.json4s.JArray
import org.json4s.JNothing
import org.json4s.JString
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJson extends App {

  class Filter(val outputDir: String) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(inputFile: File, jValue: JValue): Unit = {
      // println(s"Extracting from ${inputFile.getName}")
      val geoLocations: JValue = (jValue \\ "geolocs" \ "text")

      geoLocations match {
        case JArray(geoLocations: List[_]) => // Type erasure removes the [JString]
          geoLocations.foreach { geoLocation =>
            println(geoLocation.extract[String])
          }
        case JNothing =>
        case _ => throw new RuntimeException(s"Unexpected geoLocations value: $geoLocations")
      }
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val outputDir = args(2)
  val inputFiles = FileUtils.findFiles(inputDir, extension)
  val filter = new Filter(outputDir)

  inputFiles.foreach { inputFile =>
    val text = FileUtils.getTextFromFile(inputFile)
    val json = JsonMethods.parse(text)
    filter.filter(inputFile, json)
  }
} 
Example 76
Source File: FilterJsonCanonicalNames.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File
import java.io.PrintWriter

import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.Sinker
import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.TsvWriter
import org.json4s.DefaultFormats
import org.json4s.JArray
import org.json4s.JObject
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonCanonicalNames extends App {

  class Filter(tsvWriter: TsvWriter) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    tsvWriter.println("file", "id", "text", "canonicalName")

    def filter(jValue: JValue, inputFile: File): Unit = {
      val extractions: JValue = jValue \\ "extractions"

      extractions match {
        case JArray(extractions: List[_]) => // Type erasure removes the [JObject]
          extractions.foreach { extraction =>
            val id = (extraction \ "@id").extract[String]
            val text = (extraction \ "text").extract[String]
            val canonicalName = (extraction \ "canonicalName").extract[String]

            tsvWriter.println(inputFile.getName, id, text, canonicalName)
          }
        case JObject(_) =>
        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val outputFile = args(2)

  new TsvWriter(Sinker.printWriterFromFile(outputFile)).autoClose { tsvWriter =>
    val filter = new Filter(tsvWriter)
    val inputFiles = FileUtils.findFiles(inputDir, extension)

    inputFiles.sortBy(_.getName).foreach { inputFile =>
      val text = FileUtils.getTextFromFile(inputFile)
      val json = JsonMethods.parse(text)

      filter.filter(json, inputFile)
    }
  }
} 
Example 77
Source File: FilterJsonLigatures.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File
import java.io.PrintWriter
import java.util.regex.Pattern

import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.Sinker
import org.clulab.wm.eidos.utils.TsvWriter
import org.json4s.DefaultFormats
import org.json4s.JString
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonLigatures extends App {
  val pattern: Pattern = Pattern.compile("([A-Za-z]+(f([bhkl]|[ft]|[ij])|ij)) ([A-Za-z]+)")

  class Filter(tsvWriter: TsvWriter) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    tsvWriter.println("file", "left", "right")

    def filter(jValue: JValue, inputFile: File): Unit = {
      val extractions: JValue = jValue \ "_source" \ "extracted_text"

      extractions match {
        case text: JString =>
          val matcher = pattern.matcher(text.extract[String])

          while (matcher.find)
            tsvWriter.println(inputFile.getName, matcher.group(1), matcher.group(4))
        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val outputFile = args(2)

  new TsvWriter(Sinker.printWriterFromFile(outputFile)).autoClose { tsvWriter =>
    val filter = new Filter(tsvWriter)
    val inputFiles = FileUtils.findFiles(inputDir, extension)

    inputFiles.sortBy(_.getName).foreach { inputFile =>
      val text = FileUtils.getTextFromFile(inputFile)
      val json = JsonMethods.parse(text)

      filter.filter(json, inputFile)
    }
  }
} 
Example 78
Source File: FilterJsonPretty.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File

import org.clulab.serialization.json.stringify
import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileEditor
import org.clulab.wm.eidos.utils.FileUtils
import org.json4s.DefaultFormats
import org.json4s.JObject
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonPretty extends App {

  class Filter(outputDir: String) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(jValue: JValue, inputFile: File): Unit = {
      val extractions: JValue = jValue

      extractions match {
        case jObject: JObject =>
          val json = stringify(jObject, pretty = true)
          val path = FileEditor(inputFile).setDir(outputDir).get

          FileUtils.printWriterFromFile(path).autoClose { pw =>
            pw.println(json)
          }

        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val outputDir = args(1)
  val filter = new Filter(outputDir)
  val inputFiles = FileUtils.findFiles(inputDir, "json")

  inputFiles.sortBy(_.getName).foreach { inputFile =>
    val text = FileUtils.getTextFromFile(inputFile)
    val json = JsonMethods.parse(text)

    filter.filter(json, inputFile)
  }
} 
Example 79
Source File: FilterJsonText.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File

import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileEditor
import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.StringUtils
import org.json4s.DefaultFormats
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonText extends App {

  class Filter(outputDir: String) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(jValue: JValue, inputFile: File): Unit = {
      val jString: JValue = jValue \ "extracted_text"
      val text: String = jString.extract[String]
      val path = FileEditor(inputFile).setDir(outputDir).setExt("txt").get

      FileUtils.printWriterFromFile(path).autoClose { pw =>
        pw.println(text)
      }
    }
  }

  val inputDir = args(0)
  val outputDir = args(1)
  val filter = new Filter(outputDir)
  val inputFiles = FileUtils.findFiles(inputDir, "json")

  inputFiles.sortBy(_.getName).foreach { inputFile =>
    val text = FileUtils.getTextFromFile(inputFile)
    val json = JsonMethods.parse(text)

    filter.filter(json, inputFile)
  }
} 
Example 80
Source File: SeparateCdrTextFromDirectory.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps.batch

import java.io.File

import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileEditor
import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.StringUtils
import org.clulab.wm.eidos.utils.meta.CluText
import org.json4s.DefaultFormats
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object SeparateCdrTextFromDirectory extends App {

  class Filter(outputDir: String) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(inputFile: File, jValue: JValue): Unit = {
      println(s"Extracting from ${inputFile.getName}")
      val jString: JValue = jValue \ "extracted_text"
      val text: String = jString.extract[String]
      val outputFile = FileEditor(inputFile).setDir(outputDir).setExt("txt").get

      FileUtils.printWriterFromFile(outputFile).autoClose { printWriter =>
        printWriter.print(text)
      }
    }
  }

  val inputDir = args(0)
  val outputDir = args(1)
  val inputFiles = FileUtils.findFiles(inputDir, "json")
  val filter = new Filter(outputDir)

  inputFiles.foreach { inputFile =>
    val json = CluText.getJValue(inputFile)

    filter.filter(inputFile, json)
  }
} 
Example 81
Source File: ExampleGenerator.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import org.clulab.wm.eidos.EidosSystem
import org.clulab.wm.eidos.mentions.EidosTextBoundMention
import org.clulab.wm.eidos.serialization.json.WMJSONSerializer
import org.clulab.wm.eidos.utils.DisplayUtils.{displayMention, displayMentions}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

object ExampleGenerator extends App {

  // Needed for json4s
  implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats


  // creates an extractor engine using the rules and the default actions
  val ieSystem = new EidosSystem()

  val text = "The government uses significantly improved cultivar to boost agricultural production."
  val doc = ieSystem.annotate(text)

  // extract mentions from annotated document
  val mentions = ieSystem.extractMentionsFrom(doc).sortBy(m => (m.sentence, m.getClass.getSimpleName))
  val annotatedDocument = ieSystem.extractFromText(text)
  val entities = annotatedDocument.eidosMentions
      .filter(_.odinMention matches "Entity")
      .sortBy(eidosMention => (eidosMention.odinMention.sentence, eidosMention.odinMention.getClass.getSimpleName))

  // Display the groundings for all entities
  for (e <- entities) {
    println("EidosMention:")
    displayMention(e.odinMention)
    println("Groundings:")
    e.asInstanceOf[EidosTextBoundMention].grounding.values.foreach(g => println(s"\t$g"))
  }

  // Default debug display of the mentions
  displayMentions(mentions, doc)

  // serialize the mentions to a json file
  val mentionsJSON = WMJSONSerializer.jsonAST(mentions)
  println(pretty(render(mentionsJSON)))

//  FileUtils.closing (FileUtils.newPrintWriterFromFile("/Users/bsharp/wmExampleJson_dec7.txt")) { pw =>
//    pw.println(pretty(render(mentionsJSON)))
//  }

  // How to reconstitute the mentions:
  val newMentions = WMJSONSerializer.toMentions(mentionsJSON)

  println("\n*****************************************")
  println("             LOADED MENTIONS")
  println("*****************************************\n")
  println(s"Number of loaded mentions: ${newMentions.length}\n")

  newMentions foreach org.clulab.wm.eidos.utils.DisplayUtils.displayMention

} 
Example 82
Source File: FilterJsonSource.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File

import org.clulab.serialization.json.stringify
import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileEditor
import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.StringUtils
import org.json4s.DefaultFormats
import org.json4s.JObject
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonSource extends App {

  class Filter(outputDir: String) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(jValue: JValue, inputFile: File): Unit = {
      val extractions: JValue = jValue \ "_source"

      extractions match {
        case jObject: JObject =>
          val json = stringify(jObject, pretty = true)
          val path = FileEditor(inputFile).setDir(outputDir).get

          FileUtils.printWriterFromFile(path).autoClose { pw =>
            pw.println(json)
          }

        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val outputDir = args(1)
  val filter = new Filter(outputDir)
  val inputFiles = FileUtils.findFiles(inputDir, "json")

  inputFiles.sortBy(_.getName).foreach { inputFile =>
    val text = FileUtils.getTextFromFile(inputFile)
    val json = JsonMethods.parse(text)

    filter.filter(json, inputFile)
  }
} 
Example 83
Source File: FilterJsonExtractions.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File

import org.clulab.wm.eidos.utils.FileUtils
import org.json4s.DefaultFormats
import org.json4s.JArray
import org.json4s.JNothing
import org.json4s.JObject
import org.json4s.JString
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonExtractions extends App {

  class Filter() {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    def filter(inputFile: File, jValue: JValue): Unit = {
      println(s"Extracting from ${inputFile.getName}")
      val extractions: JValue = (jValue \\ "extractions")

      extractions match {
        case JArray(extractions: List[_]) => // Type erasure removes the [JObject]
          extractions.foreach { extraction =>
            val jString = (extraction \ "text")
            val text = jString.extract[String]
            val oneLiner = text
                .replace("\n", "\\n")
                .replace("\t", "\\t")

            println("\t" + oneLiner)
          }
        case JObject(_) =>
        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val inputFiles = FileUtils.findFiles(inputDir, extension)
  val filter = new Filter()

  inputFiles.foreach { inputFile =>
    val text = FileUtils.getTextFromFile(inputFile)
    val json = JsonMethods.parse(text)
    filter.filter(inputFile, json)
  }
} 
Example 84
Source File: WorkspaceLoader.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.console.workspacehandling

import java.nio.file.Path

import better.files.Dsl.mkdirs
import better.files.File
import org.json4s.DefaultFormats
import org.json4s.native.Serialization.{read => jsonRead}

import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Success, Try}


  def load(path: String): Workspace[ProjectType] = {
    val dirFile = File(path)
    val dirPath = dirFile.path.toAbsolutePath

    if (!dirFile.exists) {
      println(s"creating workspace directory: ${dirFile.path.toString}")
      mkdirs(dirFile)
    }
    new Workspace(ListBuffer.from(loadProjectsFromFs(dirPath)))
  }

  private def loadProjectsFromFs(cpgsPath: Path): LazyList[ProjectType] = {
    cpgsPath.toFile.listFiles
      .filter(_.isDirectory)
      .to(LazyList)
      .flatMap(f => loadProject(f.toPath))
  }

  def loadProject(path: Path): Option[ProjectType] = {
    Try {
      val projectFile = readProjectFile(path)
      createProject(projectFile, path)
    } match {
      case Success(v) => Some(v)
      case Failure(e) =>
        System.err.println(s"Error loading project at $path - skipping: ")
        System.err.println(e)
        None
    }
  }

  def createProject(projectFile: ProjectFile, path: Path): ProjectType

  private val PROJECTFILE_NAME = "project.json"
  implicit val formats: DefaultFormats.type = DefaultFormats

  private def readProjectFile(projectDirName: Path): ProjectFile = {
    // TODO see `writeProjectFile`
    val content = File(projectDirName.resolve(PROJECTFILE_NAME)).contentAsString
    val map = jsonRead[Map[String, String]](content)
    ProjectFile(map("inputPath"), map("name"))
  }

} 
Example 85
Source File: WorkspaceLoaderTests.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.console.workspacehandling

import better.files.Dsl.mkdir
import better.files.File
import org.scalatest.{Matchers, WordSpec}

import scala.reflect.io.Directory

class WorkspaceLoaderTests extends WordSpec with Matchers {

  private val tmpDirPrefix = "workspace-tests"

  "WorkspaceLoader" should {

    "create workspace and workspace directory if nonexistent" in {
      val dir = File.newTemporaryDirectory(tmpDirPrefix)
      new Directory(dir.toJava).deleteRecursively()
      TestLoader().load(dir.path.toString)
      try {
        dir.exists shouldBe true
      } finally {
        new Directory(dir.toJava).deleteRecursively()
      }
    }

    "handle broken project.json gracefully by skipping project" in {
      File.usingTemporaryDirectory(tmpDirPrefix) { tmpDir =>
        mkdir(tmpDir / "1")
        (tmpDir / "1" / "project.json").write("{foo")
        TestLoader().load(tmpDir.path.toString).numberOfProjects shouldBe 0
      }
    }

    "load project correctly" in {
      File.usingTemporaryDirectory(tmpDirPrefix) { tmpDir =>
        val projectName = "foo"
        WorkspaceTests.createFakeProject(tmpDir, projectName)
        val project = TestLoader().loadProject((tmpDir / projectName).path)
        project match {
          case Some(p) =>
            p.name shouldBe "foo"
            p.inputPath shouldBe "foo"
            p.cpg shouldBe None
          case None => fail
        }
      }
    }

    "initialize workspace's project list correctly" in {
      File.usingTemporaryDirectory(tmpDirPrefix) { tmpDir =>
        val projectName = "foo"
        WorkspaceTests.createFakeProject(tmpDir, projectName)
        val workspace = TestLoader().load(tmpDir.toString)
        workspace.numberOfProjects shouldBe 1
      }
    }
  }

  "ProjectFile" should {

    import org.json4s.DefaultFormats
    import org.json4s.native.Serialization.{read => jsonRead, write => jsonWrite}
    implicit val formats: DefaultFormats.type = DefaultFormats

    "be serializable to json" in {
      jsonWrite(ProjectFile("foo", "aname")) shouldBe """{"inputPath":"foo","name":"aname"}"""
    }

    "be deserializable from json" in {
      val projectFile = jsonRead[ProjectFile]("""{"inputPath":"foo","name":"aname"}""")
      projectFile.inputPath shouldBe "foo"
      projectFile.name shouldBe "aname"
    }

  }

} 
Example 86
Source File: IngestExampleSpec.scala    From Hands-On-Data-Analysis-with-Scala   with MIT License 5 votes vote down vote up
package handson.example.ingest

import org.json4s.DefaultFormats
import org.json4s.native.JsonMethods.parse
import org.scalatest.{FlatSpec, Matchers}


class IngestExampleSpec extends FlatSpec with Matchers {
  "Ingest of various formats" should "produce same results" in {
    implicit val formats = DefaultFormats
    // XML
    val xml = <person>
      <fname>Jon</fname>
      <lname>Doe</lname>
      <phone>123-456-7890</phone>
      <zip>12345</zip>
      <state>NY</state>
    </person>
    val normXml = Person(xml \ "fname" text, xml \ "lname" text, xml \ "phone" text, xml \ "zip" text, xml \ "state" text)
    // JSON
    val jsonStr =
      """
                 {
    "fname": "Jon",
    "lname": "Doe",
    "phone": "123-456-7890",
    "zip": "12345",
    "state": "NY"
    }"""
    val json = parse(jsonStr)
    val normJson = json.extract[Person]
    // CSV (for simplicity, we use split method of String to parse CSV)
    val csvStr = "Jon,Doe,123-456-7890,12345,NY"
    val csvCols = csvStr.split(",")
    val normCsv = Person(csvCols(0), csvCols(1), csvCols(2), csvCols(3), csvCols(4))
    // Let us make sure that all normal objects are same
    assert(normXml === normJson)
    assert(normXml === normCsv)
  }

  "getState" should "return MA for 02701" in {
    assert(IngestExample.getState("02701") === "MA")
  }

  "filter and filterNot API" should "produce same outcome with appropriate conditions" in {
    val originalPersons = List(
      Person("Jon", "Doe", "123-456-7890", "12345", "NY"),
      Person("James", "Smith", "555-456-7890", "00600", "PR"),
      Person("Don", "Duck", "777-456-7890", "00800", "VI"),
      Person("Doug", "Miller", "444-456-7890", "02800", "RI"),
      Person("Van", "Peter", "333-456-7890", "02700", "MA")
    )
    val exclusionStates = Set("PR", "VI") // we want to exclude these states
    val filteredPersons1 = originalPersons.filterNot(p => exclusionStates.contains(p.state))
    val filteredPersons2 = originalPersons.filter(p => !exclusionStates.contains(p.state))
    assert(filteredPersons1 === filteredPersons2)
  }

} 
Example 87
Source File: DataFinder.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.template

import java.io.{InputStreamReader, ByteArrayInputStream}

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.template.model.Person
import org.json4s.{StringInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

abstract class DataFinder[T, Y] {

  def find(f: T => Option[Y]): Option[Y] =
    try {
      val data = readData()
      val parsed = parse(data)
      f(parsed)
    } finally {
      cleanup()
    }

  def readData(): Array[Byte]

  def parse(data: Array[Byte]): T

  def cleanup()
}

class JsonDataFinder extends DataFinder[List[Person], Person] {
  implicit val formats = DefaultFormats

  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.json")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading json: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    JsonMethods.parse(StringInput(new String(data, "UTF-8"))).extract[List[Person]]
}

class CSVDataFinder extends DataFinder[List[Person], Person] {
  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.csv")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading csv: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    CSVReader.open(new InputStreamReader(new ByteArrayInputStream(data))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}


object DataFinderExample {
  def main(args: Array[String]): Unit = {
    val jsonDataFinder: DataFinder[List[Person], Person] = new JsonDataFinder
    val csvDataFinder: DataFinder[List[Person], Person] = new CSVDataFinder

    System.out.println(s"Find a person with name Ivan in the json: ${jsonDataFinder.find(_.find(_.name == "Ivan"))}")
    System.out.println(s"Find a person with name James in the json: ${jsonDataFinder.find(_.find(_.name == "James"))}")

    System.out.println(s"Find a person with name Maria in the csv: ${csvDataFinder.find(_.find(_.name == "Maria"))}")
    System.out.println(s"Find a person with name Alice in the csv: ${csvDataFinder.find(_.find(_.name == "Alice"))}")
  }
} 
Example 88
Source File: ParsingStrategy.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.strategy

import java.io.InputStreamReader

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.strategy.model.Person
import org.json4s.{StreamInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

class Application[T](strategy: (String) => List[T]) {
  def write(file: String): Unit = {
    System.out.println(s"Got the following data ${strategy(file)}")
  }
}

object StrategyFactory {
  implicit val formats = DefaultFormats
  
  def apply(filename: String): (String) => List[Person] =
    filename match {
      case f if f.endsWith(".json") => parseJson
      case f if f.endsWith(".csv") => parseCsv
      case f => throw new RuntimeException(s"Unknown format: $f")
    }
  
  def parseJson(file: String): List[Person] =
    JsonMethods.parse(StreamInput(this.getClass.getResourceAsStream(file))).extract[List[Person]]
  
  def parseCsv(file: String): List[Person] =
    CSVReader.open(new InputStreamReader(this.getClass.getResourceAsStream(file))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}

object StrategyExample {
  def main(args: Array[String]): Unit = {
    val applicationCsv = new Application[Person](StrategyFactory("people.csv"))
    val applicationJson = new Application[Person](StrategyFactory("people.json"))

    System.out.println("Using the csv: ")
    applicationCsv.write("people.csv")

    System.out.println("Using the json: ")
    applicationJson.write("people.json")
  }
} 
Example 89
Source File: DataFinder.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.template

import java.io.{InputStreamReader, ByteArrayInputStream}

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.template.model.Person
import org.json4s.{StringInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

abstract class DataFinder[T, Y] {

  def find(f: T => Option[Y]): Option[Y] =
    try {
      val data = readData()
      val parsed = parse(data)
      f(parsed)
    } finally {
      cleanup()
    }

  def readData(): Array[Byte]

  def parse(data: Array[Byte]): T

  def cleanup()
}

class JsonDataFinder extends DataFinder[List[Person], Person] {
  implicit val formats = DefaultFormats

  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.json")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading json: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    JsonMethods.parse(StringInput(new String(data, "UTF-8"))).extract[List[Person]]
}

class CSVDataFinder extends DataFinder[List[Person], Person] {
  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.csv")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading csv: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    CSVReader.open(new InputStreamReader(new ByteArrayInputStream(data))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}


object DataFinderExample {
  def main(args: Array[String]): Unit = {
    val jsonDataFinder: DataFinder[List[Person], Person] = new JsonDataFinder
    val csvDataFinder: DataFinder[List[Person], Person] = new CSVDataFinder

    System.out.println(s"Find a person with name Ivan in the json: ${jsonDataFinder.find(_.find(_.name == "Ivan"))}")
    System.out.println(s"Find a person with name James in the json: ${jsonDataFinder.find(_.find(_.name == "James"))}")

    System.out.println(s"Find a person with name Maria in the csv: ${csvDataFinder.find(_.find(_.name == "Maria"))}")
    System.out.println(s"Find a person with name Alice in the csv: ${csvDataFinder.find(_.find(_.name == "Alice"))}")
  }
} 
Example 90
Source File: ParsingStrategy.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.strategy

import java.io.InputStreamReader

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.strategy.model.Person
import org.json4s.{StreamInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

class Application[T](strategy: (String) => List[T]) {
  def write(file: String): Unit = {
    System.out.println(s"Got the following data ${strategy(file)}")
  }
}

object StrategyFactory {
  implicit val formats = DefaultFormats
  
  def apply(filename: String): (String) => List[Person] =
    filename match {
      case f if f.endsWith(".json") => parseJson
      case f if f.endsWith(".csv") => parseCsv
      case f => throw new RuntimeException(s"Unknown format: $f")
    }
  
  def parseJson(file: String): List[Person] =
    JsonMethods.parse(StreamInput(this.getClass.getResourceAsStream(file))).extract[List[Person]]
  
  def parseCsv(file: String): List[Person] =
    CSVReader.open(new InputStreamReader(this.getClass.getResourceAsStream(file))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}

object StrategyExample {
  def main(args: Array[String]): Unit = {
    val applicationCsv = new Application[Person](StrategyFactory("people.csv"))
    val applicationJson = new Application[Person](StrategyFactory("people.json"))

    System.out.println("Using the csv: ")
    applicationCsv.write("people.csv")

    System.out.println("Using the json: ")
    applicationJson.write("people.json")
  }
} 
Example 91
Source File: DataFinder.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.template

import java.io.{InputStreamReader, ByteArrayInputStream}

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.template.model.Person
import org.json4s.{StringInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

abstract class DataFinder[T, Y] {

  def find(f: T => Option[Y]): Option[Y] =
    try {
      val data = readData()
      val parsed = parse(data)
      f(parsed)
    } finally {
      cleanup()
    }

  def readData(): Array[Byte]

  def parse(data: Array[Byte]): T

  def cleanup()
}

class JsonDataFinder extends DataFinder[List[Person], Person] {
  implicit val formats = DefaultFormats

  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.json")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading json: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    JsonMethods.parse(StringInput(new String(data, "UTF-8"))).extract[List[Person]]
}

class CSVDataFinder extends DataFinder[List[Person], Person] {
  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.csv")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading csv: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    CSVReader.open(new InputStreamReader(new ByteArrayInputStream(data))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}


object DataFinderExample {
  def main(args: Array[String]): Unit = {
    val jsonDataFinder: DataFinder[List[Person], Person] = new JsonDataFinder
    val csvDataFinder: DataFinder[List[Person], Person] = new CSVDataFinder

    System.out.println(s"Find a person with name Ivan in the json: ${jsonDataFinder.find(_.find(_.name == "Ivan"))}")
    System.out.println(s"Find a person with name James in the json: ${jsonDataFinder.find(_.find(_.name == "James"))}")

    System.out.println(s"Find a person with name Maria in the csv: ${csvDataFinder.find(_.find(_.name == "Maria"))}")
    System.out.println(s"Find a person with name Alice in the csv: ${csvDataFinder.find(_.find(_.name == "Alice"))}")
  }
} 
Example 92
Source File: ParsingStrategy.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.strategy

import java.io.InputStreamReader

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.strategy.model.Person
import org.json4s.{StreamInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

class Application[T](strategy: (String) => List[T]) {
  def write(file: String): Unit = {
    System.out.println(s"Got the following data ${strategy(file)}")
  }
}

object StrategyFactory {
  implicit val formats = DefaultFormats
  
  def apply(filename: String): (String) => List[Person] =
    filename match {
      case f if f.endsWith(".json") => parseJson
      case f if f.endsWith(".csv") => parseCsv
      case f => throw new RuntimeException(s"Unknown format: $f")
    }
  
  def parseJson(file: String): List[Person] =
    JsonMethods.parse(StreamInput(this.getClass.getResourceAsStream(file))).extract[List[Person]]
  
  def parseCsv(file: String): List[Person] =
    CSVReader.open(new InputStreamReader(this.getClass.getResourceAsStream(file))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}

object StrategyExample {
  def main(args: Array[String]): Unit = {
    val applicationCsv = new Application[Person](StrategyFactory("people.csv"))
    val applicationJson = new Application[Person](StrategyFactory("people.json"))

    System.out.println("Using the csv: ")
    applicationCsv.write("people.csv")

    System.out.println("Using the json: ")
    applicationJson.write("people.json")
  }
} 
Example 93
Source File: DataFinder.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.template

import java.io.{InputStreamReader, ByteArrayInputStream}

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.template.model.Person
import org.json4s.{StringInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

abstract class DataFinder[T, Y] {

  def find(f: T => Option[Y]): Option[Y] =
    try {
      val data = readData()
      val parsed = parse(data)
      f(parsed)
    } finally {
      cleanup()
    }

  def readData(): Array[Byte]

  def parse(data: Array[Byte]): T

  def cleanup()
}

class JsonDataFinder extends DataFinder[List[Person], Person] {
  implicit val formats = DefaultFormats

  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.json")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading json: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    JsonMethods.parse(StringInput(new String(data, "UTF-8"))).extract[List[Person]]
}

class CSVDataFinder extends DataFinder[List[Person], Person] {
  override def readData(): Array[Byte] = {
    val stream = this.getClass.getResourceAsStream("people.csv")
    Stream.continually(stream.read).takeWhile(_ != -1).map(_.toByte).toArray
  }

  override def cleanup(): Unit = {
    System.out.println("Reading csv: nothing to do.")
  }

  override def parse(data: Array[Byte]): List[Person] =
    CSVReader.open(new InputStreamReader(new ByteArrayInputStream(data))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}


object DataFinderExample {
  def main(args: Array[String]): Unit = {
    val jsonDataFinder: DataFinder[List[Person], Person] = new JsonDataFinder
    val csvDataFinder: DataFinder[List[Person], Person] = new CSVDataFinder

    System.out.println(s"Find a person with name Ivan in the json: ${jsonDataFinder.find(_.find(_.name == "Ivan"))}")
    System.out.println(s"Find a person with name James in the json: ${jsonDataFinder.find(_.find(_.name == "James"))}")

    System.out.println(s"Find a person with name Maria in the csv: ${csvDataFinder.find(_.find(_.name == "Maria"))}")
    System.out.println(s"Find a person with name Alice in the csv: ${csvDataFinder.find(_.find(_.name == "Alice"))}")
  }
} 
Example 94
Source File: ParsingStrategy.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.strategy

import java.io.InputStreamReader

import com.github.tototoshi.csv.CSVReader
import com.ivan.nikolov.behavioral.strategy.model.Person
import org.json4s.{StreamInput, DefaultFormats}
import org.json4s.jackson.JsonMethods

class Application[T](strategy: (String) => List[T]) {
  def write(file: String): Unit = {
    System.out.println(s"Got the following data ${strategy(file)}")
  }
}

object StrategyFactory {
  implicit val formats = DefaultFormats
  
  def apply(filename: String): (String) => List[Person] =
    filename match {
      case f if f.endsWith(".json") => parseJson
      case f if f.endsWith(".csv") => parseCsv
      case f => throw new RuntimeException(s"Unknown format: $f")
    }
  
  def parseJson(file: String): List[Person] =
    JsonMethods.parse(StreamInput(this.getClass.getResourceAsStream(file))).extract[List[Person]]
  
  def parseCsv(file: String): List[Person] =
    CSVReader.open(new InputStreamReader(this.getClass.getResourceAsStream(file))).all().map {
      case List(name, age, address) =>
        Person(name, age.toInt, address)
    }
}

object StrategyExample {
  def main(args: Array[String]): Unit = {
    val applicationCsv = new Application[Person](StrategyFactory("people.csv"))
    val applicationJson = new Application[Person](StrategyFactory("people.json"))

    System.out.println("Using the csv: ")
    applicationCsv.write("people.csv")

    System.out.println("Using the json: ")
    applicationJson.write("people.json")
  }
} 
Example 95
Source File: JsonServlet.scala    From chatoverflow   with Eclipse Public License 2.0 5 votes vote down vote up
package org.codeoverflow.chatoverflow.ui.web

import javax.servlet.http.HttpServletRequest
import org.codeoverflow.chatoverflow.ui.web.rest.DTOs.ResultMessage
import org.codeoverflow.chatoverflow.{ChatOverflow, Launcher}
import org.json4s.{DefaultFormats, Formats}
import org.scalatra.json.JacksonJsonSupport
import org.scalatra.{BadRequest, CorsSupport, ScalatraServlet, Unauthorized}


  protected def authKeyRequired(func: => Any)(implicit request: HttpServletRequest): Any = {
    val authKeyKey = "authKey"

    if (request.header(authKeyKey).isEmpty) {
      BadRequest()
    } else if (request.header(authKeyKey).get != chatOverflow.credentialsService.generateAuthKey()) {
      Unauthorized()
    } else {
      func
    }
  }
} 
Example 96
Source File: EarnDotComFeeProvider.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.fee

import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JArray, JInt, JValue}
import org.json4s.jackson.Serialization

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


class EarnDotComFeeProvider(readTimeOut: Duration)(implicit http: SttpBackend[Future, Nothing], ec: ExecutionContext) extends FeeProvider {

  import EarnDotComFeeProvider._

  implicit val formats = DefaultFormats
  implicit val serialization = Serialization

  val uri = uri"https://bitcoinfees.earn.com/api/v1/fees/list"

  override def getFeerates: Future[FeeratesPerKB] =
    for {
      json <- sttp.readTimeout(readTimeOut).get(uri)
        .response(asJson[JValue])
        .send()
      feeRanges = parseFeeRanges(json.unsafeBody)
    } yield extractFeerates(feeRanges)

}

object EarnDotComFeeProvider {

  case class FeeRange(minFee: Long, maxFee: Long, memCount: Long, minDelay: Long, maxDelay: Long)

  def parseFeeRanges(json: JValue): Seq[FeeRange] = {
    val JArray(items) = json \ "fees"
    items.map(item => {
      val JInt(minFee) = item \ "minFee"
      val JInt(maxFee) = item \ "maxFee"
      val JInt(memCount) = item \ "memCount"
      val JInt(minDelay) = item \ "minDelay"
      val JInt(maxDelay) = item \ "maxDelay"
      // earn.com returns fees in Satoshi/byte and we want Satoshi/KiloByte
      FeeRange(minFee = 1000 * minFee.toLong, maxFee = 1000 * maxFee.toLong, memCount = memCount.toLong, minDelay = minDelay.toLong, maxDelay = maxDelay.toLong)
    })
  }

  def extractFeerate(feeRanges: Seq[FeeRange], maxBlockDelay: Int): Long = {
    // first we keep only fee ranges with a max block delay below the limit
    val belowLimit = feeRanges.filter(_.maxDelay <= maxBlockDelay)
    // out of all the remaining fee ranges, we select the one with the minimum higher bound and make sure it is > 0
    Math.max(belowLimit.minBy(_.maxFee).maxFee, 1)
  }

  def extractFeerates(feeRanges: Seq[FeeRange]): FeeratesPerKB =
    FeeratesPerKB(
      block_1 = extractFeerate(feeRanges, 1),
      blocks_2 = extractFeerate(feeRanges, 2),
      blocks_6 = extractFeerate(feeRanges, 6),
      blocks_12 = extractFeerate(feeRanges, 12),
      blocks_36 = extractFeerate(feeRanges, 36),
      blocks_72 = extractFeerate(feeRanges, 72),
      blocks_144 = extractFeerate(feeRanges, 144))

} 
Example 97
Source File: BitcoinCoreFeeProvider.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.fee

import fr.acinq.bitcoin._
import fr.acinq.eclair.blockchain.bitcoind.rpc.BitcoinJsonRPCClient
import org.json4s.DefaultFormats
import org.json4s.JsonAST._

import scala.concurrent.{ExecutionContext, Future}


  def estimateSmartFee(nBlocks: Int): Future[Long] =
    rpcClient.invoke("estimatesmartfee", nBlocks).map(BitcoinCoreFeeProvider.parseFeeEstimate)

  override def getFeerates: Future[FeeratesPerKB] = for {
    block_1 <- estimateSmartFee(1)
    blocks_2 <- estimateSmartFee(2)
    blocks_6 <- estimateSmartFee(6)
    blocks_12 <- estimateSmartFee(12)
    blocks_36 <- estimateSmartFee(36)
    blocks_72 <- estimateSmartFee(72)
    blocks_144 <- estimateSmartFee(144)
  } yield FeeratesPerKB(
    block_1 = if (block_1 > 0) block_1 else defaultFeerates.block_1,
    blocks_2 = if (blocks_2 > 0) blocks_2 else defaultFeerates.blocks_2,
    blocks_6 = if (blocks_6 > 0) blocks_6 else defaultFeerates.blocks_6,
    blocks_12 = if (blocks_12 > 0) blocks_12 else defaultFeerates.blocks_12,
    blocks_36 = if (blocks_36 > 0) blocks_36 else defaultFeerates.blocks_36,
    blocks_72 = if (blocks_72 > 0) blocks_72 else defaultFeerates.blocks_72,
    blocks_144 = if (blocks_144 > 0) blocks_144 else defaultFeerates.blocks_144)
}

object BitcoinCoreFeeProvider {
  def parseFeeEstimate(json: JValue): Long = {
    json \ "errors" match {
      case JNothing =>
        json \ "feerate" match {
          case JDecimal(feerate) =>
            // estimatesmartfee returns a fee rate in Btc/KB
            btc2satoshi(Btc(feerate)).toLong
          case JInt(feerate) if feerate.toLong < 0 =>
            // negative value means failure
            feerate.toLong
          case JInt(feerate) =>
            // should (hopefully) never happen
            btc2satoshi(Btc(feerate.toLong)).toLong
        }
      case JArray(errors) =>
        val error = errors.collect { case JString(error) => error }.mkString(", ")
        throw new RuntimeException(s"estimatesmartfee failed: $error")
      case _ =>
        throw new RuntimeException("estimatesmartfee failed")
    }
  }
} 
Example 98
Source File: BitgoFeeProvider.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.fee

import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import fr.acinq.bitcoin.{Block, ByteVector32}
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JInt, JValue}
import org.json4s.jackson.Serialization

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

class BitgoFeeProvider(chainHash: ByteVector32, readTimeOut: Duration)(implicit http: SttpBackend[Future, Nothing], ec: ExecutionContext) extends FeeProvider {

  import BitgoFeeProvider._

  implicit val formats = DefaultFormats
  implicit val serialization = Serialization

  val uri = chainHash match {
    case Block.LivenetGenesisBlock.hash => uri"https://www.bitgo.com/api/v2/btc/tx/fee"
    case _ => uri"https://test.bitgo.com/api/v2/tbtc/tx/fee"
  }

  override def getFeerates: Future[FeeratesPerKB] =
    for {
      res <- sttp.readTimeout(readTimeOut).get(uri)
        .response(asJson[JValue])
        .send()
      feeRanges = parseFeeRanges(res.unsafeBody)
    } yield extractFeerates(feeRanges)

}

object BitgoFeeProvider {

  case class BlockTarget(block: Int, fee: Long)

  def parseFeeRanges(json: JValue): Seq[BlockTarget] = {
    val blockTargets = json \ "feeByBlockTarget"
    blockTargets.foldField(Seq.empty[BlockTarget]) {
      // BitGo returns estimates in Satoshi/KB, which is what we want
      case (list, (strBlockTarget, JInt(feePerKB))) => list :+ BlockTarget(strBlockTarget.toInt, feePerKB.longValue)
    }
  }

  def extractFeerate(feeRanges: Seq[BlockTarget], maxBlockDelay: Int): Long = {
    // first we keep only fee ranges with a max block delay below the limit
    val belowLimit = feeRanges.filter(_.block <= maxBlockDelay)
    // out of all the remaining fee ranges, we select the one with the minimum higher bound
    belowLimit.map(_.fee).min
  }

  def extractFeerates(feeRanges: Seq[BlockTarget]): FeeratesPerKB =
    FeeratesPerKB(
      block_1 = extractFeerate(feeRanges, 1),
      blocks_2 = extractFeerate(feeRanges, 2),
      blocks_6 = extractFeerate(feeRanges, 6),
      blocks_12 = extractFeerate(feeRanges, 12),
      blocks_36 = extractFeerate(feeRanges, 36),
      blocks_72 = extractFeerate(feeRanges, 72),
      blocks_144 = extractFeerate(feeRanges, 144))

} 
Example 99
Source File: BasicBitcoinJsonRPCClient.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.bitcoind.rpc

import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import fr.acinq.bitcoin.ByteVector32
import fr.acinq.eclair.KamonExt
import fr.acinq.eclair.blockchain.Monitoring.{Metrics, Tags}
import org.json4s.{CustomSerializer, DefaultFormats}
import org.json4s.JsonAST.{JString, JValue}
import org.json4s.jackson.Serialization

import scala.concurrent.{ExecutionContext, Future}

class BasicBitcoinJsonRPCClient(user: String, password: String, host: String = "127.0.0.1", port: Int = 8332, ssl: Boolean = false)(implicit http: SttpBackend[Future, Nothing]) extends BitcoinJsonRPCClient {

  // necessary to properly serialize ByteVector32 into String readable by bitcoind
  object ByteVector32Serializer extends CustomSerializer[ByteVector32](_ => ( {
    null
  }, {
    case x: ByteVector32 => JString(x.toHex)
  }))
  implicit val formats = DefaultFormats.withBigDecimal + ByteVector32Serializer
  private val scheme = if (ssl) "https" else "http"
  private val serviceUri = uri"$scheme://$host:$port/wallet/" // wallet/ specifies to use the default bitcoind wallet, named ""
  implicit val serialization = Serialization

  override def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JValue] =
    invoke(Seq(JsonRPCRequest(method = method, params = params))).map(l => jsonResponse2Exception(l.head).result)

  def jsonResponse2Exception(jsonRPCResponse: JsonRPCResponse): JsonRPCResponse = jsonRPCResponse match {
    case JsonRPCResponse(_, Some(error), _) => throw JsonRPCError(error)
    case o => o
  }

  def invoke(requests: Seq[JsonRPCRequest])(implicit ec: ExecutionContext): Future[Seq[JsonRPCResponse]] = {
    requests.groupBy(_.method).foreach {
      case (method, calls) => Metrics.RpcBasicInvokeCount.withTag(Tags.Method, method).increment(calls.size)
    }
    KamonExt.timeFuture(Metrics.RpcBasicInvokeDuration.withoutTags()) {
      for {
        res <- sttp
          .post(serviceUri)
          .body(requests)
          .auth.basic(user, password)
          .response(asJson[Seq[JsonRPCResponse]])
          .send()
      } yield res.unsafeBody
    }
  }

} 
Example 100
Source File: BitgoFeeProviderSpec.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.fee

import akka.actor.ActorSystem
import akka.util.Timeout
import com.softwaremill.sttp.okhttp.OkHttpFutureBackend
import fr.acinq.bitcoin.Block
import org.json4s.DefaultFormats
import org.scalatest.funsuite.AnyFunSuite

import scala.concurrent.Await



class BitgoFeeProviderSpec extends AnyFunSuite {

  import BitgoFeeProvider._
  import org.json4s.jackson.JsonMethods.parse

  implicit val formats = DefaultFormats

  val sample_response =
    """
      {"feePerKb":136797,"cpfpFeePerKb":136797,"numBlocks":2,"confidence":80,"multiplier":1,"feeByBlockTarget":{"1":149453,"2":136797,"5":122390,"6":105566,"8":100149,"9":96254,"10":122151,"13":116855,"15":110860,"17":87402,"27":82635,"33":71098,"42":105782,"49":68182,"73":59207,"97":17336,"121":16577,"193":13545,"313":12268,"529":11122,"553":9139,"577":5395,"793":5070}}
    """

  test("parse test") {
    val json = parse(sample_response)
    val feeRanges = parseFeeRanges(json)
    assert(feeRanges.size === 23)
  }

  test("extract fee for a particular block delay") {
    val json = parse(sample_response)
    val feeRanges = parseFeeRanges(json)
    val fee = extractFeerate(feeRanges, 6)
    assert(fee === 105566)
  }

  test("extract all fees") {
    val json = parse(sample_response)
    val feeRanges = parseFeeRanges(json)
    val feerates = extractFeerates(feeRanges)
    val ref = FeeratesPerKB(
      block_1 = 149453,
      blocks_2 = 136797,
      blocks_6 = 105566,
      blocks_12 = 96254,
      blocks_36 = 71098,
      blocks_72 = 68182,
      blocks_144 = 16577)
    assert(feerates === ref)
  }

  test("make sure API hasn't changed") {
    import scala.concurrent.duration._
    implicit val system = ActorSystem("test")
    implicit val ec = system.dispatcher
    implicit val sttp = OkHttpFutureBackend()
    implicit val timeout = Timeout(30 seconds)
    val bitgo = new BitgoFeeProvider(Block.LivenetGenesisBlock.hash, 5 seconds)
    assert(Await.result(bitgo.getFeerates, timeout.duration).block_1 > 0)
  }

  test("check that read timeout is enforced") {
    import scala.concurrent.duration._
    implicit val system = ActorSystem("test")
    implicit val ec = system.dispatcher
    implicit val sttp = OkHttpFutureBackend()
    implicit val timeout = Timeout(30 second)
    val bitgo = new BitgoFeeProvider(Block.LivenetGenesisBlock.hash, 1 millisecond)
    val e = intercept[Exception] {
      Await.result(bitgo.getFeerates, timeout.duration)
    }
    assert(e.getMessage.contains("Read timed out"))
  }
} 
Example 101
Source File: ErrorEventsWriter.scala    From etl-light   with MIT License 5 votes vote down vote up
package yamrcraft.etlite.writers

import java.io.OutputStream

import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, IntWritable, SequenceFile, Text}
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats, ShortTypeHints}
import yamrcraft.etlite.utils.FileUtils


case class ErrorInfo(
  errorType: String,
  errorMsg: Option[String]
)


class ErrorEventWriter(folder: String, jobId: Long, partitionId: Int)
  extends ErrorEventsWriter {

  // incremental record id
  var recordId = 1

  val fs = FileUtils.getFS(folder)

  val seqPath = new Path(folder, s"errors_job${jobId}_part$partitionId.seq")
  if (fs.exists(seqPath)) {
    fs.delete(seqPath, false)
  }

  val metaPath = new Path(folder, s"errors_job${jobId}_part$partitionId.meta.seq")
  if (fs.exists(metaPath)) {
    fs.delete(metaPath, false)
  }

  private var seqWriter: Option[SequenceFile.Writer] = None
  private var metaWriter: Option[SequenceFile.Writer] = None

  implicit val formats =
    new Formats {
      val dateFormat = DefaultFormats.lossless.dateFormat
      override val typeHints = ShortTypeHints(List(classOf[ErrorInfo]))
      override val typeHintFieldName = "type"
    }

  override def write(errorEvent: (Array[Byte], ErrorInfo)) = {

    if (seqWriter.isEmpty) {
      seqWriter = createSequenceFile(seqPath, classOf[IntWritable], classOf[BytesWritable])
      metaWriter = createSequenceFile(metaPath, classOf[IntWritable], classOf[Text])
    }

    val id = new IntWritable(recordId)
    seqWriter.get.append(id, new BytesWritable(errorEvent._1))
    metaWriter.get.append(id, new Text(Serialization.write(errorEvent._2)))

    recordId += 1
  }

  override def commit() = {
    seqWriter.foreach(p => p.close())
    metaWriter.foreach(p => p.close())
  }

  private def createSequenceFile(path: Path, keyClass: Class[_], valueClass: Class[_]) = {
    val optPath = SequenceFile.Writer.file(path)
    val optKey = SequenceFile.Writer.keyClass(keyClass)
    val optVal = SequenceFile.Writer.valueClass(valueClass)
    Some(SequenceFile.createWriter(fs.getConf, optPath, optKey, optVal))
  }
} 
Example 102
Source File: ChatSupervisor.scala    From heimdallr   with Apache License 2.0 5 votes vote down vote up
package chat

import akka.actor._
import akka.actor.SupervisorStrategy._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import org.json4s._
import org.json4s.{DefaultFormats, JValue}
import java.util.concurrent.TimeUnit
import EventConstants._
import akka.stream.ActorMaterializer


  def createNewChatRoom(number: Int): ActorRef = {
    var chatroom: ActorRef = null
    try {
      //creates new ChatRoomActor and returns as an ActorRef
      chatroom = context.actorOf(Props(new ChatRoomActor(number, envType)), s"${number}")
      ChatRooms.chatRooms += number -> chatroom
    }
    catch {
      case e: Exception =>
        log.info(s"FIXME: Create new chat room(${number}) => " + e)
        self ! CreateChatRoom(number)
    }

    chatroom
  }

  def removeChatRoom(chatRoomID: Int): Unit = {
    this.synchronized {
      ChatRooms.chatRooms.remove(chatRoomID)
    }
  }

  override def receive: Receive = {
    case CreateChatRoom(chatRoomID) =>
      getChatRoomActorRef(chatRoomID)

    case RemoveChatRoom(chatRoomID) =>
      removeChatRoom(chatRoomID)

    case RegChatUser(chatRoomID, userActor) =>
      userActor ! JoinRoom(getChatRoomActorRef(chatRoomID))

    case RegProps(props, name) =>
      context.actorOf(props, name)

    case HeimdallrError =>
      throw new ArithmeticException()

    case HeimdallrChatStatus =>
      log.info( "Heimdallr ChatSupervisor Running ..." )

    // *** supervisor ! "akka://heimdallr/user/{Valid ActorName}"
    case path: String =>
      log.debug(s"checking path => $path")
      context.actorSelection(path) ! Identify(path)

    case ActorIdentity(path, Some(ref)) =>
      log.debug(s"found actor $ref on $path")

    // *** supervisor ! "/user/{Invalid ActorName}"
    case ActorIdentity(path, None) =>
      log.debug(s"could not find an actor on $path")

    case Terminated(user) =>
      log.info("Receive Terminated Event of ChatRoomActor")

    case x =>
      log.warning("ChatSupervisor Unknown message : " + x)
  }
} 
Example 103
Source File: ArrayParam.scala    From mmlspark   with MIT License 5 votes vote down vote up
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.

package org.apache.spark.ml.param

import org.apache.spark.annotation.DeveloperApi
import org.json4s.{DefaultFormats, _}
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.JavaConverters._


    def w(value: java.util.List[_]): ParamPair[Array[_]] = w(value.asScala.toArray)

    override def jsonEncode(value: Array[_]): String = {
      import org.json4s.JsonDSL._
      value match {
        case intArr: Array[Int] => compact(render(intArr.toSeq))
        case dbArr: Array[Double] => compact(render(dbArr.toSeq))
        case strArr: Array[String] => compact(render(strArr.toSeq))
        case blArr: Array[Boolean] => compact(render(blArr.toSeq))
        case intArr: Array[Integer] => compact(render(intArr.map(_.toLong).toSeq))
        case _ =>
          throw new IllegalArgumentException("Internal type not json serializable")
      }
    }

    override def jsonDecode(json: String): Array[_] = {
      implicit val formats: DefaultFormats.type = DefaultFormats
      parse(json).extract[Seq[_]].toArray
    }
  } 
Example 104
Source File: DeployConfig.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.services

import java.io.{File, FileInputStream}

import is.hail.utils._
import org.json4s.{DefaultFormats, Formats, JValue}
import org.json4s.jackson.JsonMethods

object DeployConfig {
  lazy val get: DeployConfig = fromConfigFile()

  def fromConfigFile(file0: String = null): DeployConfig = {
    var file = file0

    if (file == null)
      file = System.getenv("HAIL_DEPLOY_CONFIG_FILE")

    if (file == null) {
      val fromHome = s"${ System.getenv("HOME") }/.hail/deploy-config.json"
      if (new File(fromHome).exists())
        file = fromHome
    }

    if (file == null) {
      val f = "/deploy-config/deploy-config.json"
      if (new File(f).exists())
        file = f
    }

    if (file != null) {
      using(new FileInputStream(file)) { in =>
        fromConfig(JsonMethods.parse(in))
      }
    } else
      new DeployConfig(
        "external",
        "default",
        Map())
  }

  def fromConfig(config: JValue): DeployConfig = {
    implicit val formats: Formats = DefaultFormats
    new DeployConfig(
      (config \ "location").extract[String],
      (config \ "default_namespace").extract[String],
      (config \ "service_namespace").extract[Map[String, String]])
  }
}

class DeployConfig(
  val location: String,
  val defaultNamespace: String,
  val serviceNamespace: Map[String, String]) {

  def scheme(baseScheme: String = "http"): String = {
    if (location == "external" || location == "k8s")
      baseScheme + "s"
    else
      baseScheme
  }

  def getServiceNamespace(service: String): String = {
    serviceNamespace.getOrElse(service, defaultNamespace)
  }

  def domain(service: String): String = {
    val ns = getServiceNamespace(service)
    location match {
      case "k8s" =>
        s"$service.$ns"
      case "gce" =>
        if (ns == "default")
          s"$service.hail"
        else
          "internal.hail"
      case "external" =>
        if (ns == "default")
          s"$service.hail.is"
        else
          "internal.hail.is"
    }
  }

  def basePath(service: String): String = {
    val ns = getServiceNamespace(service)
    if (ns == "default")
      ""
    else
      s"/$ns/$service"
  }

  def baseUrl(service: String, baseScheme: String = "http"): String = {
    s"${ scheme(baseScheme) }://${ domain(service) }${ basePath(service) }"
  }
} 
Example 105
Source File: package.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.services

import is.hail.utils._
import org.json4s.{DefaultFormats, Formats}
import java.io.{File, FileInputStream}
import java.security.KeyStore

import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}
import org.apache.log4j.{LogManager, Logger}
import org.json4s.jackson.JsonMethods

class NoSSLConfigFound(
  message: String,
  cause: Throwable
) extends Exception(message, cause) {
  def this() = this(null, null)

  def this(message: String) = this(message, null)
}

case class SSLConfig(
  outgoing_trust: String,
  outgoing_trust_store: String,
  incoming_trust: String,
  incoming_trust_store: String,
  key: String,
  cert: String,
  key_store: String)

package object tls {
  lazy val log: Logger = LogManager.getLogger("is.hail.tls")

  private[this] lazy val _getSSLConfig: SSLConfig = {
    var configFile = System.getenv("HAIL_SSL_CONFIG_FILE")
    if (configFile == null)
      configFile = "/ssl-config/ssl-config.json"
    if (!new File(configFile).isFile)
      throw new NoSSLConfigFound(s"no ssl config file found at $configFile")

    log.info(s"ssl config file found at $configFile")

    using(new FileInputStream(configFile)) { is =>
      implicit val formats: Formats = DefaultFormats
      JsonMethods.parse(is).extract[SSLConfig]
    }
  }

  lazy val getSSLContext: SSLContext = {
    val sslConfig = _getSSLConfig

    val pw = "dummypw".toCharArray

    val ks = KeyStore.getInstance("PKCS12")
    using(new FileInputStream(sslConfig.key_store)) { is =>
      ks.load(is, pw)
    }
    val kmf = KeyManagerFactory.getInstance("SunX509")
    kmf.init(ks, pw)

    val ts = KeyStore.getInstance("JKS")
    using(new FileInputStream(sslConfig.outgoing_trust_store)) { is =>
      ts.load(is, pw)
    }
    val tmf = TrustManagerFactory.getInstance("SunX509")
    tmf.init(ts)

    val ctx = SSLContext.getInstance("TLS")
    ctx.init(kmf.getKeyManagers, tmf.getTrustManagers, null)

    ctx
  }
} 
Example 106
Source File: Tokens.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.services

import is.hail.utils._
import java.io.{File, FileInputStream}

import org.apache.http.client.methods.HttpUriRequest
import org.apache.log4j.{LogManager, Logger}
import org.json4s.{DefaultFormats, Formats}
import org.json4s.jackson.JsonMethods

object Tokens {
  lazy val log: Logger = LogManager.getLogger("Tokens")

  def get: Tokens = {
    val file = getTokensFile()
    if (new File(file).isFile) {
      using(new FileInputStream(file)) { is =>
        implicit val formats: Formats = DefaultFormats
        new Tokens(JsonMethods.parse(is).extract[Map[String, String]])
      }
    } else {
      log.info(s"tokens file not found: $file")
      new Tokens(Map())
    }
  }

  def getTokensFile(): String = {
    if (DeployConfig.get.location == "external")
      s"${ System.getenv("HOME") }/.hail/tokens.json"
    else
      "/user-tokens/tokens.json"
  }
}

class Tokens(
  tokens: Map[String, String]
) {
  def namespaceToken(ns: String): String = tokens(ns)

  def addNamespaceAuthHeaders(ns: String, req: HttpUriRequest): Unit = {
    val token = namespaceToken(ns)
    req.addHeader("Authorization", s"Bearer $token")
    val location = DeployConfig.get.location
    if (location == "external" && ns != "default")
      req.addHeader("X-Hail-Internal-Authorization", s"Bearer ${ namespaceToken("default") }")
  }

  def addServiceAuthHeaders(service: String, req: HttpUriRequest): Unit = {
    addNamespaceAuthHeaders(DeployConfig.get.getServiceNamespace(service), req)
  }
} 
Example 107
Source File: MatrixWriter.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.expr.ir

import is.hail.types.virtual.Type
import is.hail.io._
import is.hail.io.gen.{ExportBGEN, ExportGen}
import is.hail.io.plink.ExportPlink
import is.hail.io.vcf.ExportVCF
import is.hail.utils.ExportType
import org.json4s.{DefaultFormats, Formats, ShortTypeHints}

object MatrixWriter {
  implicit val formats: Formats = new DefaultFormats() {
    override val typeHints = ShortTypeHints(
      List(classOf[MatrixNativeWriter], classOf[MatrixVCFWriter], classOf[MatrixGENWriter],
        classOf[MatrixBGENWriter], classOf[MatrixPLINKWriter], classOf[WrappedMatrixWriter]))
    override val typeHintFieldName = "name"
  }
}

case class WrappedMatrixWriter(writer: MatrixWriter,
  colsFieldName: String,
  entriesFieldName: String,
  colKey: IndexedSeq[String]) extends TableWriter {
  def path: String = writer.path
  def apply(ctx: ExecuteContext, tv: TableValue): Unit = writer(ctx, tv.toMatrixValue(colKey, colsFieldName, entriesFieldName))
}

abstract class MatrixWriter {
  def path: String
  def apply(ctx: ExecuteContext, mv: MatrixValue): Unit
}

case class MatrixNativeWriter(
  path: String,
  overwrite: Boolean = false,
  stageLocally: Boolean = false,
  codecSpecJSONStr: String = null,
  partitions: String = null,
  partitionsTypeStr: String = null
) extends MatrixWriter {
  def apply(ctx: ExecuteContext, mv: MatrixValue): Unit = mv.write(ctx, path, overwrite, stageLocally, codecSpecJSONStr, partitions, partitionsTypeStr)
}

case class MatrixVCFWriter(
  path: String,
  append: Option[String] = None,
  exportType: String = ExportType.CONCATENATED,
  metadata: Option[VCFMetadata] = None
) extends MatrixWriter {
  def apply(ctx: ExecuteContext, mv: MatrixValue): Unit = ExportVCF(ctx, mv, path, append, exportType, metadata)
}

case class MatrixGENWriter(
  path: String,
  precision: Int = 4
) extends MatrixWriter {
  def apply(ctx: ExecuteContext, mv: MatrixValue): Unit = ExportGen(ctx, mv, path, precision)
}

case class MatrixBGENWriter(
  path: String,
  exportType: String
) extends MatrixWriter {
  def apply(ctx: ExecuteContext, mv: MatrixValue): Unit = ExportBGEN(ctx, mv, path, exportType)
}

case class MatrixPLINKWriter(
  path: String
) extends MatrixWriter {
  def apply(ctx: ExecuteContext, mv: MatrixValue): Unit = ExportPlink(ctx, mv, path)
}

object MatrixNativeMultiWriter {
  implicit val formats: Formats = new DefaultFormats() {
    override val typeHints = ShortTypeHints(List(classOf[MatrixNativeMultiWriter]))
    override val typeHintFieldName = "name"
  }
}

case class MatrixNativeMultiWriter(
  prefix: String,
  overwrite: Boolean = false,
  stageLocally: Boolean = false
) {
  def apply(ctx: ExecuteContext, mvs: IndexedSeq[MatrixValue]): Unit = MatrixValue.writeMultiple(ctx, mvs, prefix, overwrite, stageLocally)
} 
Example 108
Source File: AbstractTableSpec.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.expr.ir

import java.io.OutputStreamWriter

import is.hail.utils._
import is.hail.types._
import is.hail.io.fs.FS
import is.hail.rvd._
import org.json4s.jackson.JsonMethods
import org.json4s.{DefaultFormats, Extraction, Formats, JValue, ShortTypeHints}

import scala.language.implicitConversions

object SortOrder {
  def deserialize(b: Byte): SortOrder =
    if (b == 0.toByte) Ascending
    else if (b == 1.toByte) Descending
    else throw new RuntimeException(s"invalid sort order: $b")
}

sealed abstract class SortOrder {
  def serialize: Byte
}

case object Ascending extends SortOrder {
  def serialize: Byte = 0.toByte
}

case object Descending extends SortOrder {
  def serialize: Byte = 1.toByte
}

case class SortField(field: String, sortOrder: SortOrder)

abstract class AbstractTableSpec extends RelationalSpec {
  def references_rel_path: String

  def table_type: TableType

  def rowsComponent: RVDComponentSpec = getComponent[RVDComponentSpec]("rows")

  def rowsSpec: AbstractRVDSpec

  def globalsSpec: AbstractRVDSpec

  def indexed: Boolean = rowsSpec.indexed
}

object TableSpec {
  def apply(fs: FS, path: String, params: TableSpecParameters): TableSpec = {
    val globalsComponent = params.components("globals").asInstanceOf[RVDComponentSpec]
    val globalsSpec = globalsComponent.rvdSpec(fs, path)

    val rowsComponent = params.components("rows").asInstanceOf[RVDComponentSpec]
    val rowsSpec = rowsComponent.rvdSpec(fs, path)

    new TableSpec(params, globalsSpec, rowsSpec)
  }

  def fromJValue(fs: FS, path: String, jv: JValue): TableSpec = {
    implicit val formats: Formats = RelationalSpec.formats
    val params = jv.extract[TableSpecParameters]
    TableSpec(fs, path, params)
  }
}

case class TableSpecParameters(
  file_version: Int,
  hail_version: String,
  references_rel_path: String,
  table_type: TableType,
  components: Map[String, ComponentSpec]) {

  def write(fs: FS, path: String) {
    using(new OutputStreamWriter(fs.create(path + "/metadata.json.gz"))) { out =>
      out.write(JsonMethods.compact(decomposeWithName(this, "TableSpec")(RelationalSpec.formats)))
    }
  }
}

class TableSpec(
  val params: TableSpecParameters,
  val globalsSpec: AbstractRVDSpec,
  val rowsSpec: AbstractRVDSpec) extends AbstractTableSpec {
  def file_version: Int = params.file_version

  def hail_version: String = params.hail_version

  def components: Map[String, ComponentSpec] = params.components

  def references_rel_path: String = params.references_rel_path

  def table_type: TableType = params.table_type

  def toJValue: JValue = {
    decomposeWithName(params, "TableSpec")(RelationalSpec.formats)
  }
} 
Example 109
Source File: NativeReaderOptions.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.expr.ir

import is.hail.types.virtual._
import is.hail.expr.JSONAnnotationImpex
import is.hail.utils._
import org.json4s.{CustomSerializer, DefaultFormats, Formats, JObject, JValue}
import org.json4s.JsonDSL._

class NativeReaderOptionsSerializer() extends CustomSerializer[NativeReaderOptions](
  format =>
    ({ case jObj: JObject =>
      implicit val fmt = format
      val filterIntervals = (jObj \ "filterIntervals").extract[Boolean]
      val intervalPointType = IRParser.parseType((jObj \ "intervalPointType").extract[String])
      val intervals = {
        val jv = jObj \ "intervals"
        val ty = TArray(TInterval(intervalPointType))
        JSONAnnotationImpex.importAnnotation(jv, ty).asInstanceOf[IndexedSeq[Interval]]
      }
      NativeReaderOptions(intervals, intervalPointType, filterIntervals)
    }, { case opts: NativeReaderOptions =>
      implicit val fmt = format
      val ty = TArray(TInterval(opts.intervalPointType))
      (("name" -> opts.getClass.getSimpleName) ~
        ("intervals" -> JSONAnnotationImpex.exportAnnotation(opts.intervals, ty)) ~
        ("intervalPointType" -> opts.intervalPointType.parsableString()) ~
        ("filterIntervals" -> opts.filterIntervals))
    })
)

object NativeReaderOptions {
  def fromJValue(jv: JValue): NativeReaderOptions = {
    implicit val formats: Formats = DefaultFormats

    val filterIntervals = (jv \ "filterIntervals").extract[Boolean]
    val intervalPointType = IRParser.parseType((jv \ "intervalPointType").extract[String])
    val intervals = {
      val jvIntervals = jv \ "intervals"
      val ty = TArray(TInterval(intervalPointType))
      JSONAnnotationImpex.importAnnotation(jvIntervals, ty).asInstanceOf[IndexedSeq[Interval]]
    }
    NativeReaderOptions(intervals, intervalPointType, filterIntervals)
  }
}

case class NativeReaderOptions(
  intervals: IndexedSeq[Interval],
  intervalPointType: Type,
  filterIntervals: Boolean = false) {
  def toJson: JValue = {
    val ty = TArray(TInterval(intervalPointType))
    JObject(
      "name" -> "NativeReaderOptions",
      "intervals" -> JSONAnnotationImpex.exportAnnotation(intervals, ty),
      "intervalPointType" -> intervalPointType.parsableString(),
      "filterIntervals" -> filterIntervals)
  }
} 
Example 110
Source File: BlockMatrixWriter.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.expr.ir

import is.hail.HailContext
import is.hail.io.fs.FS
import is.hail.linalg.BlockMatrix
import is.hail.utils.richUtils.RichDenseMatrixDouble
import org.json4s.{DefaultFormats, Formats, ShortTypeHints}

object BlockMatrixWriter {
  implicit val formats: Formats = new DefaultFormats() {
    override val typeHints = ShortTypeHints(
      List(classOf[BlockMatrixNativeWriter], classOf[BlockMatrixBinaryWriter], classOf[BlockMatrixRectanglesWriter],
        classOf[BlockMatrixBinaryMultiWriter], classOf[BlockMatrixTextMultiWriter],
        classOf[BlockMatrixPersistWriter]))
    override val typeHintFieldName: String = "name"
  }
}


abstract class BlockMatrixWriter {
  def pathOpt: Option[String]
  def apply(ctx: ExecuteContext, bm: BlockMatrix): Unit
}

case class BlockMatrixNativeWriter(
  path: String,
  overwrite: Boolean,
  forceRowMajor: Boolean,
  stageLocally: Boolean) extends BlockMatrixWriter {
  def pathOpt: Option[String] = Some(path)

  def apply(ctx: ExecuteContext, bm: BlockMatrix): Unit = bm.write(ctx, path, overwrite, forceRowMajor, stageLocally)
}

case class BlockMatrixBinaryWriter(path: String) extends BlockMatrixWriter {
  def pathOpt: Option[String] = Some(path)
  def apply(ctx: ExecuteContext, bm: BlockMatrix): Unit = {
    RichDenseMatrixDouble.exportToDoubles(ctx.fs, path, bm.toBreezeMatrix(), forceRowMajor = true)
  }
}

case class BlockMatrixPersistWriter(id: String, storageLevel: String) extends BlockMatrixWriter {
  def pathOpt: Option[String] = None
  def apply(ctx: ExecuteContext, bm: BlockMatrix): Unit =
    HailContext.sparkBackend("BlockMatrixPersistWriter").bmCache.persistBlockMatrix(id, bm, storageLevel)
}

case class BlockMatrixRectanglesWriter(
  path: String,
  rectangles: Array[Array[Long]],
  delimiter: String,
  binary: Boolean) extends BlockMatrixWriter {

  def pathOpt: Option[String] = Some(path)

  def apply(ctx: ExecuteContext, bm: BlockMatrix): Unit = {
    bm.exportRectangles(ctx, path, rectangles, delimiter, binary)
  }
}

abstract class BlockMatrixMultiWriter {
  def apply(fs: FS, bms: IndexedSeq[BlockMatrix]): Unit
}

case class BlockMatrixBinaryMultiWriter(
  prefix: String,
  overwrite: Boolean) extends BlockMatrixMultiWriter {

  def apply(fs: FS, bms: IndexedSeq[BlockMatrix]): Unit =
    BlockMatrix.binaryWriteBlockMatrices(fs, bms, prefix, overwrite)
}

case class BlockMatrixTextMultiWriter(
  prefix: String,
  overwrite: Boolean,
  delimiter: String,
  header: Option[String],
  addIndex: Boolean,
  compression: Option[String],
  customFilenames: Option[Array[String]]) extends BlockMatrixMultiWriter {

  def apply(fs: FS, bms: IndexedSeq[BlockMatrix]): Unit =
    BlockMatrix.exportBlockMatrices(fs, bms, prefix, overwrite, delimiter, header, addIndex, compression, customFilenames)
} 
Example 111
Source File: BatchClientSuite.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.services.batch_client

import is.hail.utils._

import org.json4s.JsonAST.{JArray, JBool, JInt, JObject, JString}
import org.json4s.{DefaultFormats, Formats}
import org.scalatest.testng.TestNGSuite
import org.testng.annotations.Test

class BatchClientSuite extends TestNGSuite {
  @Test def testBasic(): Unit = {
    val client = new BatchClient()
    val token = tokenUrlSafe(32)
    val batch = client.run(
      JObject(
        "billing_project" -> JString("test"),
        "n_jobs" -> JInt(1),
        "token" -> JString(token)),
      FastIndexedSeq(
        JObject(
          "always_run" -> JBool(false),
          "image" -> JString("ubuntu:18.04"),
          "mount_docker_socket" -> JBool(false),
          "command" -> JArray(List(
            JString("/bin/bash"),
            JString("-c"),
            JString("echo 'Hello, world!'"))),
          "job_id" -> JInt(0),
          "parent_ids" -> JArray(List()))))
    implicit val formats: Formats = DefaultFormats
    assert((batch \ "state").extract[String] == "success")
  }
} 
Example 112
Source File: RWrappers.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.r

import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

import org.apache.spark.SparkException
import org.apache.spark.ml.util.MLReader


private[r] object RWrappers extends MLReader[Object] {

  override def load(path: String): Object = {
    implicit val format = DefaultFormats
    val rMetadataPath = new Path(path, "rMetadata").toString
    val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
    val rMetadata = parse(rMetadataStr)
    val className = (rMetadata \ "class").extract[String]
    className match {
      case "org.apache.spark.ml.r.NaiveBayesWrapper" => NaiveBayesWrapper.load(path)
      case "org.apache.spark.ml.r.AFTSurvivalRegressionWrapper" =>
        AFTSurvivalRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper" =>
        GeneralizedLinearRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.KMeansWrapper" =>
        KMeansWrapper.load(path)
      case "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper" =>
        MultilayerPerceptronClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.LDAWrapper" =>
        LDAWrapper.load(path)
      case "org.apache.spark.ml.r.IsotonicRegressionWrapper" =>
        IsotonicRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GaussianMixtureWrapper" =>
        GaussianMixtureWrapper.load(path)
      case "org.apache.spark.ml.r.ALSWrapper" =>
        ALSWrapper.load(path)
      case "org.apache.spark.ml.r.LogisticRegressionWrapper" =>
        LogisticRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.RandomForestRegressorWrapper" =>
        RandomForestRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.RandomForestClassifierWrapper" =>
        RandomForestClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.GBTRegressorWrapper" =>
        GBTRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.GBTClassifierWrapper" =>
        GBTClassifierWrapper.load(path)
      case _ =>
        throw new SparkException(s"SparkR read.ml does not support load $className")
    }
  }
} 
Example 113
Source File: JsonVectorConverter.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}

private[ml] object JsonVectorConverter {

  
  def toJson(v: Vector): String = {
    v match {
      case SparseVector(size, indices, values) =>
        val jValue = ("type" -> 0) ~
          ("size" -> size) ~
          ("indices" -> indices.toSeq) ~
          ("values" -> values.toSeq)
        compact(render(jValue))
      case DenseVector(values) =>
        val jValue = ("type" -> 1) ~ ("values" -> values.toSeq)
        compact(render(jValue))
    }
  }
} 
Example 114
Source File: VectorAssembler.scala    From aardpfark   with Apache License 2.0 5 votes vote down vote up
package com.ibm.aardpfark.spark.ml.feature

import com.ibm.aardpfark.pfa.document.{PFABuilder, PFADocument}
import com.ibm.aardpfark.pfa.expression.PFAExpression
import com.ibm.aardpfark.spark.ml.PFATransformer
import org.apache.avro.{Schema, SchemaBuilder}
import org.apache.spark.ml.feature.VectorAssembler
import org.json4s.DefaultFormats

class PFAVectorAssembler(override val sparkTransformer: VectorAssembler) extends PFATransformer {

  import com.ibm.aardpfark.pfa.dsl._
  implicit val formats = DefaultFormats

  private val inputCols = sparkTransformer.getInputCols
  private val outputCol = sparkTransformer.getOutputCol

  type DorSeqD = Either[Double, Seq[Double]]

  override protected def inputSchema: Schema = {
    val builder = SchemaBuilder.record(withUid(inputBaseName)).fields()
    for (inputCol <- inputCols) {
      builder.name(inputCol).`type`()
        .unionOf()
        .doubleType().and()
        .array().items().doubleType()
        .endUnion().noDefault()
    }
    builder.endRecord()
  }

  override protected def outputSchema: Schema = {
    SchemaBuilder.record(withUid(outputBaseName)).fields()
      .name(outputCol).`type`().array().items().doubleType().noDefault()
      .endRecord()
  }

  private val asDouble = As[Double]("x", x => NewArray[Double](x))
  private val asArray = As[Array[Double]]("x", x => x)

  private val castFn = NamedFunctionDef("castToArray",
    FunctionDef[DorSeqD, Seq[Double]]("x") { x =>
      Cast(x, asDouble, asArray)
    }
  )

  override protected def action: PFAExpression = {
    val cols = Let("cols", NewArray[DorSeqD](inputCols.map(c => StringExpr(s"input.$c"))))
    Action(
      cols,
      NewRecord(outputSchema, Map(outputCol -> a.flatten(a.map(cols.ref, castFn.ref))))
    )
  }

  override def pfa: PFADocument = {
    PFABuilder()
      .withName(sparkTransformer.uid)
      .withMetadata(getMetadata)
      .withInput(inputSchema)
      .withOutput(outputSchema)
      .withAction(action)
      .withFunction(castFn)
      .pfa
  }
} 
Example 115
Source File: CustomDefaults.scala    From avro4s   with Apache License 2.0 5 votes vote down vote up
package com.sksamuel.avro4s

import magnolia.{SealedTrait, Subtype}
import org.json4s.native.JsonMethods.parse
import org.json4s.native.Serialization.write
import org.apache.avro.Schema
import org.apache.avro.Schema.Type
import org.json4s.DefaultFormats

import scala.collection.JavaConverters._

sealed trait CustomDefault
case class CustomUnionDefault(className: String, values: java.util.Map[String, Any]) extends CustomDefault
case class CustomUnionWithEnumDefault(parentName: String, default: String, value: String) extends CustomDefault
case class CustomEnumDefault(value: String) extends CustomDefault

object CustomDefaults {

  implicit val formats = DefaultFormats

  def customScalaEnumDefault(value: Any) = CustomEnumDefault(value.toString)

  def customDefault(p: Product, schema: Schema): CustomDefault =
    if(isEnum(p, schema.getType))
      CustomEnumDefault(trimmedClassName(p))
    else {
      if(isUnionOfEnum(schema)) {
        val enumType = schema.getTypes.asScala.filter(_.getType == Schema.Type.ENUM).head
        CustomUnionWithEnumDefault(enumType.getName, trimmedClassName(p), p.toString)
      } else
        CustomUnionDefault(trimmedClassName(p), parse(write(p)).extract[Map[String, Any]].map {
          case (name, b: BigInt) if b.isValidInt => name -> b.intValue
          case (name, b: BigInt) if b.isValidLong => name -> b.longValue
          case (name, z) if schema.getType == Type.UNION => name ->
            schema.getTypes.asScala.find(_.getName == trimmedClassName(p)).map(_.getField(name).schema())
              .map(DefaultResolver(z, _)).getOrElse(z)
          case (name, z) => name -> DefaultResolver(z, schema.getField(name).schema())

        }.asJava)
    }

  def isUnionOfEnum(schema: Schema) = schema.getType == Schema.Type.UNION && schema.getTypes.asScala.map(_.getType).contains(Schema.Type.ENUM)

  def sealedTraitEnumDefaultValue[T](ctx: SealedTrait[SchemaFor, T]) = {
    val defaultExtractor = new AnnotationExtractors(ctx.annotations)
    defaultExtractor.enumDefault.flatMap { default =>
      ctx.subtypes.flatMap { st: Subtype[SchemaFor, T] =>
        if(st.typeName.short == default.toString)
          Option(st.typeName.short)
        else
          None
      }.headOption
    }
  }

  def isScalaEnumeration(value: Any) = value.getClass.getCanonicalName == "scala.Enumeration.Val"

  private def isEnum(product: Product, schemaType: Schema.Type) =
    product.productArity == 0 && schemaType == Schema.Type.ENUM

  private def trimmedClassName(p: Product) = trimDollar(p.getClass.getSimpleName)

  private def trimDollar(s: String) = if(s.endsWith("$")) s.dropRight(1) else s
} 
Example 116
Source File: RWrappers.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.r

import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

import org.apache.spark.SparkException
import org.apache.spark.ml.util.MLReader


private[r] object RWrappers extends MLReader[Object] {

  override def load(path: String): Object = {
    implicit val format = DefaultFormats
    val rMetadataPath = new Path(path, "rMetadata").toString
    val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
    val rMetadata = parse(rMetadataStr)
    val className = (rMetadata \ "class").extract[String]
    className match {
      case "org.apache.spark.ml.r.NaiveBayesWrapper" => NaiveBayesWrapper.load(path)
      case "org.apache.spark.ml.r.AFTSurvivalRegressionWrapper" =>
        AFTSurvivalRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper" =>
        GeneralizedLinearRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.KMeansWrapper" =>
        KMeansWrapper.load(path)
      case "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper" =>
        MultilayerPerceptronClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.LDAWrapper" =>
        LDAWrapper.load(path)
      case "org.apache.spark.ml.r.IsotonicRegressionWrapper" =>
        IsotonicRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.GaussianMixtureWrapper" =>
        GaussianMixtureWrapper.load(path)
      case "org.apache.spark.ml.r.ALSWrapper" =>
        ALSWrapper.load(path)
      case "org.apache.spark.ml.r.LogisticRegressionWrapper" =>
        LogisticRegressionWrapper.load(path)
      case "org.apache.spark.ml.r.RandomForestRegressorWrapper" =>
        RandomForestRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.RandomForestClassifierWrapper" =>
        RandomForestClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.DecisionTreeRegressorWrapper" =>
        DecisionTreeRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.DecisionTreeClassifierWrapper" =>
        DecisionTreeClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.GBTRegressorWrapper" =>
        GBTRegressorWrapper.load(path)
      case "org.apache.spark.ml.r.GBTClassifierWrapper" =>
        GBTClassifierWrapper.load(path)
      case "org.apache.spark.ml.r.BisectingKMeansWrapper" =>
        BisectingKMeansWrapper.load(path)
      case "org.apache.spark.ml.r.LinearSVCWrapper" =>
        LinearSVCWrapper.load(path)
      case "org.apache.spark.ml.r.FPGrowthWrapper" =>
        FPGrowthWrapper.load(path)
      case _ =>
        throw new SparkException(s"SparkR read.ml does not support load $className")
    }
  }
} 
Example 117
Source File: JsonVectorConverter.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}

private[ml] object JsonVectorConverter {

  
  def toJson(v: Vector): String = {
    v match {
      case SparseVector(size, indices, values) =>
        val jValue = ("type" -> 0) ~
          ("size" -> size) ~
          ("indices" -> indices.toSeq) ~
          ("values" -> values.toSeq)
        compact(render(jValue))
      case DenseVector(values) =>
        val jValue = ("type" -> 1) ~ ("values" -> values.toSeq)
        compact(render(jValue))
    }
  }
} 
Example 118
Source File: JsonMatrixConverter.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}

private[ml] object JsonMatrixConverter {

  
  def toJson(m: Matrix): String = {
    m match {
      case SparseMatrix(numRows, numCols, colPtrs, rowIndices, values, isTransposed) =>
        val jValue = ("class" -> className) ~
          ("type" -> 0) ~
          ("numRows" -> numRows) ~
          ("numCols" -> numCols) ~
          ("colPtrs" -> colPtrs.toSeq) ~
          ("rowIndices" -> rowIndices.toSeq) ~
          ("values" -> values.toSeq) ~
          ("isTransposed" -> isTransposed)
        compact(render(jValue))
      case DenseMatrix(numRows, numCols, values, isTransposed) =>
        val jValue = ("class" -> className) ~
          ("type" -> 1) ~
          ("numRows" -> numRows) ~
          ("numCols" -> numCols) ~
          ("values" -> values.toSeq) ~
          ("isTransposed" -> isTransposed)
        compact(render(jValue))
    }
  }
} 
Example 119
Source File: NsdbNodeEndpoint.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster

import akka.actor.{ActorRef, ActorSystem}
import akka.event.{Logging, LoggingAdapter}
import com.typesafe.config.Config
import io.radicalbit.nsdb.cluster.endpoint.GrpcEndpoint
import io.radicalbit.nsdb.security.NsdbSecurity
import io.radicalbit.nsdb.web.{BitSerializer, CustomSerializers, WebResources}
import org.json4s.{DefaultFormats, Formats}


class NsdbNodeEndpoint(readCoordinator: ActorRef,
                       writeCoordinator: ActorRef,
                       metadataCoordinator: ActorRef,
                       publisher: ActorRef)(override implicit val system: ActorSystem)
    extends WebResources
    with NsdbSecurity {

  override val config: Config = system.settings.config

  override implicit val logger: LoggingAdapter = Logging.getLogger(system, this)

  new GrpcEndpoint(readCoordinator = readCoordinator,
                   writeCoordinator = writeCoordinator,
                   metadataCoordinator = metadataCoordinator)

  implicit val formats: Formats = DefaultFormats ++ CustomSerializers.customSerializers + BitSerializer

  initWebEndpoint(writeCoordinator, readCoordinator, metadataCoordinator, publisher)

} 
Example 120
Source File: GraphiteMetricSpec.scala    From slab   with Apache License 2.0 5 votes vote down vote up
package com.criteo.slab.lib

import com.criteo.slab.lib.graphite.{DataPoint, GraphiteMetric}
import com.criteo.slab.utils.Jsonable
import org.json4s.DefaultFormats
import org.scalatest.{FlatSpec, Matchers}

import scala.util.Success

class GraphiteMetricSpec extends FlatSpec with Matchers {
  "JSON serializer" should "be able to read json" in {
    val json = """[{"target":"metric.one", "datapoints":[[1.0, 2000], [null, 2060]]}]""".stripMargin.replace("\n", "")
    val formats = DefaultFormats ++ Jsonable[GraphiteMetric].serializers
    val r = Jsonable.parse[List[GraphiteMetric]](json, formats)
    r shouldEqual Success(List(GraphiteMetric("metric.one", List(
      DataPoint(Some(1.0), 2000),
      DataPoint(None, 2060)
    ))))
  }
} 
Example 121
Source File: Visualization.scala    From lightning-scala   with MIT License 5 votes vote down vote up
package org.viz.lightning

import org.json4s.DefaultFormats
import org.json4s.native.Serialization
import scala.language.dynamics
import scalaj.http._

class Visualization(val lgn: Lightning, val id: String, val name: String) {

  if (lgn.isNotebook) {
    //implicit val HTMLViz = org.refptr.iscala.display.HTMLDisplay[Visualization] { viz =>
    //  viz.getHTML
    //}
    //org.refptr.iscala.display.display_html(this)
  }

  def formatURL(url: String): String = {
    val out = url.last.toString match {
      case "/" => url
      case _ => url + "/"
    }
    out + "?host=" + lgn.host
  }

  def getPermalinkURL: String = {
    lgn.host + "/visualizations/" + id
  }

  def getEmbedLink: String = {
    formatURL(this.getPermalinkURL + "/embed")
  }

  def getIframeLink: String = {
    formatURL(this.getPermalinkURL + "/iframe")
  }

  def getPymLink: String = {
    formatURL(this.getPermalinkURL + "/pym")
  }

  def getDataLink: String = {
    formatURL(lgn.host + "/sessions/" + lgn.session + "/visualizations/" + id + "/data/")
  }

  def getHTML: String = {
    val url = getEmbedLink
    var request = Http(url).method("GET")
    if (lgn.auth.nonEmpty) {
      request = request.auth(lgn.auth.get._1, lgn.auth.get._2)
    }
    request.asString.body
  }

  def append(payload: Map[String, Any]) : Visualization = {
    val url = lgn.host + "/sessions/" + lgn.session + "/visualizations/" + this.id + "/data/"
    implicit val formats = DefaultFormats
    val blob = Map("data" -> payload)
    lgn.post(url, Serialization.write(blob))
    this
  }
	
  def getPublicLink: String = {
    this.getPermalinkURL + "/public/"
  }
  
} 
Example 122
Source File: StoryJSONExtractor.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.story

import java.io._
import java.util.Date

import io.gzet.story.util.Tokenizer
import org.apache.spark.{Logging, SparkConf, SparkContext}
import org.elasticsearch.spark._
import org.json4s.DefaultFormats
import org.json4s.native.JsonMethods._

import scala.util.Try

object StoryJSONExtractor extends SimpleConfig with Logging {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setAppName("Story Extractor")
    val sc = new SparkContext(sparkConf)

    val outputDir = args.head
    val minWeight = Try(args.last.toInt).getOrElse(0)

    val nodes = sc.esJsonRDD(esNodesResource).map({ case (_, strJson) =>
      implicit val format = DefaultFormats
      val json = parse(strJson)
      val title = (json \ "title").extractOrElse[String]("")
      val gid = (json \ "gid").extractOrElse[Int](-1)
      val articles = (json \ "articles").extractOrElse[Int](-1)
      val cid = (json \ "cid").extractOrElse[Int](-1)
      val date = (json \ "date").extractOrElse[Long](0L)
      Array(cid, gid, new Date(date).toString, articles, Tokenizer.lucene(title.replaceAll("\\n", "").replaceAll("\\r", "")).mkString(" ")).mkString(",")
    }).collect()

    val nodesMap = sc.broadcast(sc.esJsonRDD(esNodesResource).map({ case (_, strJson) =>
      implicit val format = DefaultFormats
      val json = parse(strJson)
      val gid = (json \ "gid").extractOrElse[Int](-1)
      val cid = (json \ "cid").extractOrElse[Int](-1)
      (cid, gid)
    }).collectAsMap())

    val edges = sc.esJsonRDD(esEdgesResource).map({ case (_, strJson) =>
      implicit val format = DefaultFormats
      val json = parse(strJson)
      val source = (json \ "source").extractOrElse[Int](-1)
      val target = (json \ "target").extractOrElse[Int](-1)
      val weight = (json \ "weight").extractOrElse[Int](-1)
      (source, target, weight)
    }).filter(_._3 > minWeight).map({ case (source, target, weight) =>
      val mutation = nodesMap.value.getOrElse(source, -1) != nodesMap.value.getOrElse(target, -1)
      Array(source, target, weight, mutation).mkString(",")
    }).collect()

    printToFile(new File(s"$outputDir/nodes")) { p =>
      p.println("id,story,date,articles,label")
      nodes.foreach(p.println)
    }

    printToFile(new File(s"$outputDir/edges")) { p =>
      p.println("source,target,weight,mutation")
      edges.foreach(p.println)
    }
  }

  def printToFile(f: java.io.File)(op: java.io.PrintWriter => Unit) {
    val p = new java.io.PrintWriter(f)
    try {
      op(p)
    } finally {
      p.close()
    }
  }
} 
Example 123
Source File: ESReader.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.community.elasticsearch

import io.gzet.community.util.GzetPersons
import org.apache.spark.SparkContext
import org.elasticsearch.spark._
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

class ESReader(config: ESConfig) extends Serializable {

  def loadPersons(sc: SparkContext, esField: String, query: String = "?q=*") = {
    val jsonRdd = sc.esJsonRDD(config.esIndex, query).values
    jsonRdd map { jsonString =>
      implicit val format = DefaultFormats
      val json = parse(jsonString)
      (json \ esField).extract[Array[String]]
    } flatMap GzetPersons.buildTuples
  }

} 
Example 124
Source File: KubernetesDeploymentRequest.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.container_driver.kubernetes

import io.vamp.container_driver.Docker
import org.json4s.DefaultFormats
import org.json4s.native.Serialization._

case class KubernetesDeploymentRequest(
    name:       String,
    docker:     Docker,
    replicas:   Int,
    cpu:        Double,
    mem:        Int,
    privileged: Boolean,
    env:        Map[String, String],
    cmd:        List[String],
    args:       List[String],
    labels:     Map[String, String],
    dialect:    Map[String, Any]    = Map()
) extends KubernetesArtifact {

  override def toString: String = {

    val container: Map[String, Any] = Map[String, Any](
      "image" → docker.image,
      "name" → name,
      "env" → env.map({ case (n, v) ⇒ Map[String, Any]("name" → n, "value" → v) }),
      "ports" → docker.portMappings.map(pm ⇒ Map[String, Any](
        "name" → s"p${pm.containerPort}", "containerPort" → pm.containerPort, "protocol" → pm.protocol.toUpperCase
      )),
      "args" → args,
      "command" → cmd,
      "resources" → Map[String, Any](
        "requests" → Map[String, Any](
          "cpu" → cpu,
          "memory" → s"${mem}M"
        )
      ),
      "securityContext" → Map[String, Any]("privileged" → privileged)
    )

    val containerDialect: Map[String, Any] = (dialect.getOrElse("containers", List()) match {
      case l: List[_] ⇒ l.headOption.getOrElse(Map()).asInstanceOf[Map[String, Any]]
      case _          ⇒ Map[String, Any]()
    }).filterNot { case (k, _) ⇒ container.contains(k) }

    val deployment = Map(
      "apiVersion" → "extensions/v1beta1",
      "kind" → "Deployment",
      "metadata" → Map("name" → name),
      "spec" → Map(
        "replicas" → replicas,
        "template" → Map(
          "metadata" → labels2map(labels),
          "spec" → (
            dialect ++ Map(
              "containers" → List(
                containerDialect ++ container
              )
            )
          )
        )
      )
    )

    write(deployment)(DefaultFormats)
  }
} 
Example 125
Source File: Parsing.scala    From meetup-stream   with Apache License 2.0 5 votes vote down vote up
package util

import core._
import org.joda.time.DateTime
import org.json4s.DefaultFormats
import org.json4s._
import org.json4s.native.JsonMethods._
import org.joda.time.DateTime
import org.apache.spark.Partitioner
import org.apache.spark.streaming.Seconds
import scala.util.Try

object Parsing {
  
  
  @transient implicit val formats = DefaultFormats
  
  def parseEvent(eventJson: String):Option[Event]={
    Try({
      val json=parse(eventJson).camelizeKeys
      val event=json.extract[Event]
      event      
    }).toOption
  }
  
  def parseRsvp(rsvpJson: String)={
    Try({
      val json=parse(rsvpJson).camelizeKeys
      val member=(json \ "member").extract[Member]
      val event=(json \ "event").extract[MemberEvent]
      val response=(json \ "response").extract[String]
      (member, event, response)
    }).toOption
  }
             
} 
Example 126
Source File: SparkFeaturePFASuiteBase.scala    From aardpfark   with Apache License 2.0 5 votes vote down vote up
package com.ibm.aardpfark.pfa

import com.opendatagroup.hadrian.jvmcompiler.PFAEngine
import org.json4s.DefaultFormats

import org.apache.spark.ml.{PipelineModel, Transformer}
import org.apache.spark.sql.types.StructType


abstract class SparkPipelinePFASuiteBase[A <: Result](implicit m: Manifest[A])
  extends SparkPredictorPFASuiteBase[A] {
  import com.ibm.aardpfark.spark.ml.SparkSupport._

  protected val schema: StructType

  override protected def transformerToPFA(t: Transformer, pretty: Boolean): String = {
    toPFA(t.asInstanceOf[PipelineModel], schema, pretty)
  }
}

abstract class SparkFeaturePFASuiteBase[A <: Result](implicit m: Manifest[A])
  extends SparkPFASuiteBase {

  implicit val formats = DefaultFormats

  protected var isDebug = false

  import com.ibm.aardpfark.spark.ml.SparkSupport._
  import org.json4s._
  import org.json4s.native.JsonMethods._

  test("PFA transformer produces the same results as Spark transformer") {
    parityTest(sparkTransformer, input, expectedOutput)
  }

  protected def transformerToPFA(t: Transformer, pretty: Boolean): String = {
    toPFA(t, pretty)
  }

  protected def testInputVsExpected(
      engine: PFAEngine[AnyRef, AnyRef],
      input: Array[String],
      expectedOutput: Array[String]) = {
    import ApproxEquality._
    input.zip(expectedOutput).foreach { case (in, out) =>
      val pfaResult = engine.action(engine.jsonInput(in))
      val actual = parse(pfaResult.toString).extract[A]
      val expected = parse(out).extract[A]
      (actual, expected) match {
        case (a: ScalerResult, e: ScalerResult) => assert(a.scaled === e.scaled)
        case (a: Result, e: Result) => assert(a === e)
      }
    }
  }

  def parityTest(
      sparkTransformer: Transformer,
      input: Array[String],
      expectedOutput: Array[String]): Unit = {
    val PFAJson = transformerToPFA(sparkTransformer, pretty = true)
    if (isDebug) {
      println(PFAJson)
    }
    val engine = getPFAEngine(PFAJson)
    testInputVsExpected(engine, input, expectedOutput)
  }
}

case class ScalerResult(scaled: Seq[Double]) extends Result 
Example 127
Source File: KafkaFlowExample.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example.flow

import org.apache.spark.streaming.dstream.DStream._
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.joda.time.DateTime
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._

import scala.util.Try

case class Purchase(item_id: String, amount: BigDecimal, time: Long)
case class Key(item_id: String, time: DateTime)
case class Summary(item_id: String, time: DateTime, total: BigDecimal)

object KafkaFlowExample {
  implicit val formats = DefaultFormats

  def extract(message: String): Option[(Key, BigDecimal)] = {
    for {
      parsed <- Try(parse(message)).toOption
      purchase <- parsed.extractOpt[Purchase]
    } yield {
      val datetime = new DateTime(purchase.time)
      val roundedTime = datetime.withMinuteOfHour(0).withSecondOfMinute(0).withMillisOfSecond(0)
      Key(purchase.item_id, roundedTime) -> purchase.amount
    }
  }

  def transformStream(stream: InputDStream[String]): DStream[Summary] = {
    stream
      .flatMap(extract)
      .reduceByKey(_ + _)
      .map { case (key, amount) =>
        Summary(key.item_id, key.time, amount)
      }
  }
} 
Example 128
Source File: JsonVectorConverter.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse => parseJson, render}

object JsonVectorConverter {

  /**
    * Parses the JSON representation of a vector into a [[Vector]].
    */
  def fromJson(json: String): Vector = {
    implicit val formats = DefaultFormats
    val jValue = parseJson(json)
    (jValue \ "type").extract[Int] match {
      case 0 => // sparse
        val size = (jValue \ "size").extract[Int]
        val indices = (jValue \ "indices").extract[Seq[Int]].toArray
        val values = (jValue \ "values").extract[Seq[Double]].toArray
        Vectors.sparse(size, indices, values)
      case 1 => // dense
        val values = (jValue \ "values").extract[Seq[Double]].toArray
        Vectors.dense(values)
      case _ =>
        throw new IllegalArgumentException(s"Cannot parse $json into a vector.")
    }
  }

  /**
    * Coverts the vector to a JSON string.
    */
  def toJson(v: Vector): String = {
    v match {
      case IntSparseVector(size, indices, values) =>
        val jValue = ("type" -> 0) ~
          ("size" -> size) ~
          ("indices" -> indices.toSeq) ~
          ("values" -> values.toSeq)
        compact(render(jValue))
      case DenseVector(values) =>
        val jValue = ("type" -> 1) ~ ("values" -> values.toSeq)
        compact(render(jValue))
    }
  }
} 
Example 129
Source File: JsonMatrixConverter.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.linalg

import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, render, parse => parseJson}

object JsonMatrixConverter {

  
  val className = "matrix"

  /**
    * Parses the JSON representation of a Matrix into a [[Matrix]].
    */
  def fromJson(json: String): Matrix = {
    implicit val formats: DefaultFormats.type = DefaultFormats
    val jValue = parseJson(json)
    (jValue \ "type").extract[Int] match {
      case 0 => // sparse
        val numRows = (jValue \ "numRows").extract[Int]
        val numCols = (jValue \ "numCols").extract[Int]
        val colPtrs = (jValue \ "colPtrs").extract[Seq[Int]].toArray
        val rowIndices = (jValue \ "rowIndices").extract[Seq[Int]].toArray
        val values = (jValue \ "values").extract[Seq[Double]].toArray
        val isTransposed = (jValue \ "isTransposed").extract[Boolean]
        new SparseMatrix(numRows, numCols, colPtrs, rowIndices, values, isTransposed)
      case 1 => // dense
        val numRows = (jValue \ "numRows").extract[Int]
        val numCols = (jValue \ "numCols").extract[Int]
        val values = (jValue \ "values").extract[Seq[Double]].toArray
        val isTransposed = (jValue \ "isTransposed").extract[Boolean]
        new DenseMatrix(numRows, numCols, values, isTransposed)
      case _ =>
        throw new IllegalArgumentException(s"Cannot parse $json into a Matrix.")
    }
  }

  /**
    * Coverts the Matrix to a JSON string.
    */
  def toJson(m: Matrix): String = {
    m match {
      case SparseMatrix(numRows, numCols, colPtrs, rowIndices, values, isTransposed) =>
        val jValue = ("class" -> className) ~
          ("type" -> 0) ~
          ("numRows" -> numRows) ~
          ("numCols" -> numCols) ~
          ("colPtrs" -> colPtrs.toSeq) ~
          ("rowIndices" -> rowIndices.toSeq) ~
          ("values" -> values.toSeq) ~
          ("isTransposed" -> isTransposed)
        compact(render(jValue))
      case DenseMatrix(numRows, numCols, values, isTransposed) =>
        val jValue = ("class" -> className) ~
          ("type" -> 1) ~
          ("numRows" -> numRows) ~
          ("numCols" -> numCols) ~
          ("values" -> values.toSeq) ~
          ("isTransposed" -> isTransposed)
        compact(render(jValue))
    }
  }
} 
Example 130
Source File: JObjectParam.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package com.tencent.angel.sona.ml.param
import com.tencent.angel.sona.ml.util.Identifiable
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, parse, render}

class JObjectParam(parent: String, name: String, doc: String, isValid: JObject => Boolean)
  extends Param[JObject](parent, name, doc, isValid) {
  def this(parent: String, name: String, doc: String) =
    this(parent, name, doc, (value: JObject) => value != null)

  def this(parent: Identifiable, name: String, doc: String, isValid: JObject => Boolean) =
    this(parent.uid, name, doc, isValid)

  def this(parent: Identifiable, name: String, doc: String) = this(parent.uid, name, doc)

  override def w(value: JObject): ParamPair[JObject] = super.w(value)

  override def jsonEncode(value: JObject): String = {
    compact(render(value))
  }

  override def jsonDecode(json: String): JObject = {
    implicit val formats: DefaultFormats = DefaultFormats
    parse(json).asInstanceOf[JObject]
  }
} 
Example 131
Source File: ExampleApp.scala    From akka-http-json   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.akkahttpjson4s

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.scaladsl.Source
import org.json4s.{ DefaultFormats, jackson }

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.io.StdIn

object ExampleApp {

  final case class Foo(bar: String)

  def main(args: Array[String]): Unit = {
    implicit val system = ActorSystem()

    Http().bindAndHandle(route, "127.0.0.1", 8000)

    StdIn.readLine("Hit ENTER to exit")
    Await.ready(system.terminate(), Duration.Inf)
  }

  def route(implicit sys: ActorSystem) = {
    import Directives._
    import Json4sSupport._

    implicit val serialization = jackson.Serialization // or native.Serialization
    implicit val formats       = DefaultFormats

    pathSingleSlash {
      post {
        entity(as[Foo]) { foo =>
          complete {
            foo
          }
        }
      }
    } ~ pathPrefix("stream") {
      post {
        entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] =>
          complete(fooSource.throttle(1, 2.seconds))
        }
      } ~ get {
        pathEndOrSingleSlash {
          complete(
            Source(0 to 5)
              .throttle(1, 1.seconds)
              .map(i => Foo(s"bar-$i"))
          )
        } ~ pathPrefix("remote") {
          onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) {
            response => complete(Unmarshal(response).to[SourceOf[Foo]])
          }
        }
      }
    }
  }
} 
Example 132
Source File: JsonConversions.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.api

import java.lang.reflect.InvocationTargetException

import org.json4s.{MappingException, DefaultFormats, Formats}
import spray.http.{HttpCharsets, HttpEntity, MediaTypes}
import spray.httpx.Json4sJacksonSupport
import spray.httpx.unmarshalling.Unmarshaller


object JsonConversions extends Json4sJacksonSupport {
	implicit def json4sJacksonFormats: Formats = DefaultFormats

	implicit def jsonApiUnmarshaller[T: Manifest] =
		Unmarshaller[T](MediaTypes.`application/json`) {
      case x: HttpEntity.NonEmpty ⇒
        try serialization.read[T](x.asString(defaultCharset = HttpCharsets.`UTF-8`))
        catch {
          case MappingException("unknown error", ite: InvocationTargetException) ⇒ throw ite.getCause
        }
    }
} 
Example 133
Source File: Inputs.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.model

import org.json4s.{DefaultFormats, _}
import org.json4s.native.JsonMethods._


  def readFromString[T: Manifest](jsonData: String, subPath: String*) (implicit formats: Formats = DefaultFormats): T = {
    val fullAST = parse(jsonData)
    val jsonAST = subPath.foldLeft(fullAST){case (ast, subField) => ast\subField}
    jsonAST.extract[T]
  }

  def fromJson(json: String): Inputs = {
    readFromString[Inputs](json)
  }

  object NoInputs extends Inputs(Set(), Set())
} 
Example 134
Source File: BillerCache.scala    From apple-of-my-iap   with MIT License 5 votes vote down vote up
package com.meetup.iap

import com.meetup.iap.receipt.Subscription
import org.slf4j.LoggerFactory

import java.io.File
import scala.io.Source

import org.json4s.DefaultFormats
import org.json4s.native.Serialization.{read, writePretty}
import org.apache.commons.io.FileUtils


object BillerCache {
  val log = LoggerFactory.getLogger(BillerCache.getClass)

  implicit val formats = DefaultFormats

  private val ProjectName = "iap-service"
  private val inProject = new File(".").getCanonicalPath.endsWith(ProjectName)

  private val Folder = {
    val base = if(inProject) "" else "iap-service/"
    new File(s"${base}tmp/")
  }
  if(!Folder.exists) {
    Folder.mkdirs
  }

  private val TempFile = new File(Folder, "subscriptions.json")
  if(!TempFile.exists) {
    TempFile.createNewFile
  }

  private val PlansFile = new File(Folder, "plans.json")
  if (!PlansFile.exists) {
    PlansFile.createNewFile
  }

  def readFromCache(): Map[String, Subscription] = {
    log.info("Reading from file: " + TempFile.getAbsolutePath)
    val raw = Source.fromFile(TempFile).mkString.trim

    if(raw.nonEmpty) {
        Map(read[Map[String, Subscription]](raw).toSeq: _*)
    } else Map.empty
  }

  def writeToCache(subs: Map[String, Subscription]) {
      val json = writePretty(subs)
      FileUtils.writeStringToFile(TempFile, json, "UTF-8")
  }

  def readPlansFromFile(): List[Plan] = {
    log.info(s"Reading from plans file: ${PlansFile.getAbsolutePath}")
    val raw = Source.fromFile(PlansFile).mkString.trim

    if(raw.nonEmpty) {
      log.info("Found some plans")
      List(read[List[Plan]](raw).toSeq: _*)
    } else List.empty
  }
} 
Example 135
Source File: JsonUpgradeConfigurationTest.scala    From RTran   with Apache License 2.0 5 votes vote down vote up
package com.ebay.rtran.core

import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import org.scalatest.{FlatSpecLike, Matchers}


class JsonUpgradeConfigurationTest extends FlatSpecLike with Matchers {

  implicit val formats = DefaultFormats

  "UpgradeConfiguration" should "be constructed from JSON string with projectType and rules defined" in {
    val jsonString =
      """
        |{
        | "ruleConfigs":[
        |   {"name":"MyModifyFileRule"},
        |   {"name":"MyRenameFileRule"}
        | ]
        |}
      """.stripMargin
    val configuration = parse(jsonString).extract[JsonUpgradeConfiguration]
    configuration.ruleConfigs should be (List(
      JsonRuleConfiguration("MyModifyFileRule", None),
      JsonRuleConfiguration("MyRenameFileRule", None)
    ))
  }

  "UpgradeConfiguration" should "be constructed from JSON string with all fields defined" in {
    val jsonString =
      """
        |{
        | "ruleConfigs":[
        |   {
        |     "name":"MyModifyFileRule",
        |     "config":{
        |       "key1":"value1"
        |     }
        |   },
        |   {
        |     "name":"MyRenameFileRule",
        |     "config":[
        |       "value1",
        |       "value2"
        |     ]
        |   },
        |   {
        |     "name":"MyRenameFileRule",
        |     "config":{
        |       "key3":{
        |         "key1":"value1"
        |       },
        |       "key4":"value4"
        |     }
        |   }
        | ]
        |}
      """.stripMargin
    val configuration = parse(jsonString).extract[JsonUpgradeConfiguration]
    configuration.ruleConfigs.size should be (3)
    configuration.ruleConfigs foreach (_.config should not be None)
  }

} 
Example 136
Source File: HostTimeSpan.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.timespan

import com.qubole.sparklens.common.AggregateMetrics
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.TaskInfo
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JValue

import scala.collection.mutable


class HostTimeSpan(val hostID: String) extends TimeSpan {
  var hostMetrics = new AggregateMetrics()


  override def duration():Option[Long] = {
    Some(super.duration().getOrElse(System.currentTimeMillis() - startTime))
  }

  def updateAggregateTaskMetrics (taskMetrics: TaskMetrics, taskInfo: TaskInfo): Unit = {
    hostMetrics.update(taskMetrics, taskInfo)
  }
  override def getMap(): Map[String, _ <: Any] = {
    implicit val formats = DefaultFormats
    Map("hostID" -> hostID, "hostMetrics" -> hostMetrics.getMap) ++ super.getStartEndTime()
  }

}

object HostTimeSpan {
  def getTimeSpan(json: Map[String, JValue]): mutable.HashMap[String, HostTimeSpan] = {
    implicit val formats = DefaultFormats
    val map = new mutable.HashMap[String, HostTimeSpan]

    json.keys.map(key => {
      val value = json.get(key).get
      val timeSpan = new HostTimeSpan((value \ "hostID").extract[String])
      timeSpan.hostMetrics = AggregateMetrics.getAggregateMetrics((value \ "hostMetrics")
        .extract[JValue])
      timeSpan.addStartEnd(value)
      map.put(key, timeSpan)
    })

    map
  }
} 
Example 137
Source File: TimeSpan.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.timespan

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JValue


trait TimeSpan  {
  var startTime: Long = 0
  var endTime: Long = 0

  def setEndTime(time: Long): Unit = {
    endTime = time
  }

  def setStartTime(time: Long): Unit = {
    startTime = time
  }
  def isFinished(): Boolean = (endTime != 0 && startTime != 0)

  def duration(): Option[Long] = {
    if (isFinished()) {
      Some(endTime - startTime)
    } else {
      None
    }
  }
  def getMap(): Map[String, _ <: Any]

  def getStartEndTime(): Map[String, Long] = {
    Map("startTime" -> startTime, "endTime" -> endTime)
  }

  def addStartEnd(json: JValue): Unit = {
    implicit val formats = DefaultFormats
    this.startTime = (json \ "startTime").extract[Long]
    this.endTime = (json \ "endTime").extract[Long]
  }
} 
Example 138
Source File: JobTimeSpan.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.timespan

import com.qubole.sparklens.common.{AggregateMetrics, AppContext}
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.TaskInfo
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JValue

import scala.collection.{immutable, mutable}



  private def criticalTime(stageID: Int, data: mutable.HashMap[Int, (Seq[Int], Long)]): Long = {
    //Provide 0 value for
    val stageData = data.getOrElse(stageID, (List.empty[Int], 0L))
    stageData._2 + {
      if (stageData._1.size == 0) {
        0L
      }else {
        stageData._1.map(x => criticalTime(x, data)).max
      }
    }
  }

  override def getMap(): Map[String, _ <: Any] = {
    implicit val formats = DefaultFormats

    Map(
      "jobID" -> jobID,
      "jobMetrics" -> jobMetrics.getMap,
      "stageMap" -> AppContext.getMap(stageMap)) ++ super.getStartEndTime()
  }
}

object JobTimeSpan {
  def getTimeSpan(json: Map[String, JValue]): mutable.HashMap[Long, JobTimeSpan] = {
    implicit val formats = DefaultFormats
    val map = new mutable.HashMap[Long, JobTimeSpan]

    json.keys.map(key => {
      val value = json.get(key).get.extract[JValue]
      val timeSpan = new JobTimeSpan((value \ "jobID").extract[Long])

      timeSpan.jobMetrics = AggregateMetrics.getAggregateMetrics((value \ "jobMetrics")
              .extract[JValue])
      timeSpan.stageMap = StageTimeSpan.getTimeSpan((value \ "stageMap").extract[
        immutable.Map[String, JValue]])
      timeSpan.addStartEnd(value)
      map.put(key.toLong, timeSpan)

    })
    map
  }
} 
Example 139
Source File: ExecutorTimeSpan.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.timespan

import com.qubole.sparklens.common.AggregateMetrics
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.TaskInfo
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JValue

import scala.collection.mutable

class ExecutorTimeSpan(val executorID: String,
                       val hostID: String,
                       val cores: Int) extends TimeSpan {
  var executorMetrics = new AggregateMetrics()

  def updateAggregateTaskMetrics (taskMetrics: TaskMetrics, taskInfo: TaskInfo): Unit = {
    executorMetrics.update(taskMetrics, taskInfo)
  }

  override def getMap(): Map[String, _ <: Any] = {
    implicit val formats = DefaultFormats

    Map("executorID" -> executorID, "hostID" -> hostID, "cores" -> cores, "executorMetrics" ->
      executorMetrics.getMap()) ++ super.getStartEndTime()
  }
}

object ExecutorTimeSpan {
  def getTimeSpan(json: Map[String, JValue]): mutable.HashMap[String, ExecutorTimeSpan] = {

    implicit val formats = DefaultFormats
    val map = new mutable.HashMap[String, ExecutorTimeSpan]

    json.keys.map(key => {
      val value = json.get(key).get
      val timeSpan = new ExecutorTimeSpan(
        (value \ "executorID").extract[String],
        (value \ "hostID").extract[String],
        (value \ "cores").extract[Int]
      )
      timeSpan.executorMetrics = AggregateMetrics.getAggregateMetrics((value
              \ "executorMetrics").extract[JValue])
      timeSpan.addStartEnd(value)
      map.put(key, timeSpan)
    })
    map
  }
} 
Example 140
Source File: EventHistoryReporter.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.app

import java.io.{BufferedInputStream, InputStream}
import java.net.URI

import com.ning.compress.lzf.LZFInputStream
import com.qubole.sparklens.QuboleJobListener
import com.qubole.sparklens.common.Json4sWrapper
import com.qubole.sparklens.helper.HDFSConfigHelper
import net.jpountz.lz4.LZ4BlockInputStream
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkConf
import org.json4s.DefaultFormats
import org.xerial.snappy.SnappyInputStream


class EventHistoryReporter(file: String, extraConf: List[(String, String)] = List.empty) {

  // This is using reflection in spark-2.0.0 ReplayListenerBus
  val busKlass = Class.forName("org.apache.spark.scheduler.ReplayListenerBus")
  val bus = busKlass.newInstance()
  val addListenerMethod = busKlass.getMethod("addListener", classOf[Object])
  val conf = new SparkConf()
    .set("spark.sparklens.reporting.disabled", "false")
    .set("spark.sparklens.save.data", "false")

  extraConf.foreach(x => {
    conf.set(x._1, x._2)
  })

  val listener = new QuboleJobListener(conf)
  addListenerMethod.invoke(bus, listener)


  try {
    val replayMethod = busKlass.getMethod("replay", classOf[InputStream], classOf[String],
      classOf[Boolean])
    replayMethod.invoke(bus, getDecodedInputStream(file, conf), file, boolean2Boolean(false))
  } catch {
    case _: NoSuchMethodException => // spark binaries are 2.1* and above
      val replayMethod = busKlass.getMethod("replay", classOf[InputStream], classOf[String],
        classOf[Boolean], classOf[String => Boolean])
      replayMethod.invoke(bus, getDecodedInputStream(file, conf), file, boolean2Boolean(false),
        getFilter _)
    case x: Exception => {
     println(s"Failed replaying events from ${file} [${x.getMessage}]")
    }
  }


  // Borrowed from CompressionCodecs in spark
  private def getDecodedInputStream(file: String, conf: SparkConf): InputStream = {

    val fs = FileSystem.get(new URI(file), HDFSConfigHelper.getHadoopConf(Some(conf)))
    val path = new Path(file)
    val bufStream = new BufferedInputStream(fs.open(path))

    val logName = path.getName.stripSuffix(".inprogress")
    val codecName: Option[String] = logName.split("\\.").tail.lastOption

    codecName.getOrElse("") match {
      case "lz4" => new LZ4BlockInputStream(bufStream)
      case "lzf" => new LZFInputStream(bufStream)
      case "snappy" => new SnappyInputStream(bufStream)
      case _ => bufStream
    }
  }

  private def getFilter(eventString: String): Boolean = {
    implicit val formats = DefaultFormats
    eventFilter.contains(Json4sWrapper.parse(eventString).extract[Map[String, Any]].get("Event")
      .get.asInstanceOf[String])
  }

  private def eventFilter: Set[String] = {
    Set(
      "SparkListenerTaskEnd",
      "SparkListenerApplicationStart",
      "SparkListenerApplicationEnd",
      "SparkListenerExecutorAdded",
      "SparkListenerExecutorRemoved",
      "SparkListenerJobStart",
      "SparkListenerJobEnd",
      "SparkListenerStageSubmitted",
      "SparkListenerStageCompleted"
    )
  }

} 
Example 141
Source File: ApplicationInfo.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.common

import org.json4s.DefaultFormats
import org.json4s.JsonAST.JValue

case class ApplicationInfo (var applicationID:String = "NA",
                            var startTime:Long = 0L,
                            var endTime:Long = 0L) {

  def getMap(): Map[String, Any] = {
    implicit val formats = DefaultFormats
    Map("applicationID" -> applicationID, "startTime" -> startTime, "endTime" -> endTime)
  }
}

object ApplicationInfo {

  def getObject(jvalue: JValue): ApplicationInfo = {
    implicit val formats = DefaultFormats

    ApplicationInfo(
      (jvalue \ "applicationID").extract[String],
      (jvalue \ "startTime").extract[Long],
      (jvalue \ "endTime").extract[Long])
  }
} 
Example 142
Source File: JsonBodyForError.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.seahorse.datasource.model

import org.json4s.DefaultFormats

object JsonBodyForError {

  import org.json4s.jackson.Serialization.write
  implicit val formats = DefaultFormats

  def apply(errorCode: Int, message: String): String = {
    val error = Error(
      code = errorCode,
      message = message
    )
    write(error)
  }

} 
Example 143
Source File: JsonBodyForError.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.seahorse.scheduling.model

import org.json4s.DefaultFormats

object JsonBodyForError {

  import org.json4s.jackson.Serialization.write
  implicit val formats = DefaultFormats

  def apply(errorCode: Int, message: String): String = {
    val error = Error(
      code = errorCode,
      message = message
    )
    write(error)
  }

} 
Example 144
Source File: UpgradeConfiguration.scala    From RTran   with Apache License 2.0 5 votes vote down vote up
package com.ebay.rtran.core

import com.fasterxml.jackson.databind.JsonNode
import com.typesafe.scalalogging.LazyLogging
import org.json4s.JsonAST.JValue
import org.json4s.jackson.JsonMethods._
import com.ebay.rtran.api.{IModel, IRule, IRuleConfigFactory}
import org.json4s.DefaultFormats

import scala.util.{Failure, Success, Try}


trait RuleProducer {
  val ruleInstances: List[_ <: IRule[_ <: IModel]]
}

trait UpgradeConfiguration extends RuleProducer {
  val ruleConfigs: List[JsonRuleConfiguration]
}

case class JsonRuleConfiguration(name: String, metadata: Option[JValue] = None, config: Option[JValue] = None)

case class JsonUpgradeConfiguration(ruleConfigs: List[JsonRuleConfiguration])
  extends UpgradeConfiguration with JsonRuleProducer

trait JsonRuleProducer extends RuleProducer with LazyLogging {self: UpgradeConfiguration =>

  lazy val ruleInstances = ruleConfigs map {
    case JsonRuleConfiguration(name, metadata, configOpt) =>
      logger.info("Creating instance for {} with config {}", name, configOpt)
      implicit val formats = DefaultFormats

      //copy settings from metadata to Rule Registry
      RuleRegistry.findRuleDefinition(name) flatMap { case (ruleClass, rule) =>
        val properties = metadata.map(json => json.extract[Map[String, Any]])
        val configFactory = (rule.configFactory getOrElse DefaultJsonRuleConfigFactory)
          .asInstanceOf[IRuleConfigFactory[JsonNode]]
        configOpt map { config =>
          Try(JsonConfigurableRuleFactory.createRuleWithConfig(ruleClass, configFactory, asJsonNode(config)))
        } getOrElse Try(JsonConfigurableRuleFactory.createRule(ruleClass)) match {
          case Success(instance) =>
            properties.map(m => m.mapValues(_.toString)).map(m => RuleRegistry.saveRuleMetadata(instance, m))
            Some(instance)
          case Failure(e) =>
            logger.warn(e.getMessage)
            None
        }
      }
  } collect {
    case Some(instance) => instance
  }

} 
Example 145
Source File: ApiHandler.scala    From twitter-stream-ml   with GNU General Public License v3.0 5 votes vote down vote up
package com.giorgioinf.twtml.web

import org.mashupbots.socko.events.{HttpRequestEvent,WebSocketFrameEvent}
import org.mashupbots.socko.infrastructure.Logger
import akka.actor.Actor

import org.json4s.DefaultFormats
import org.json4s.native.Serialization.write

// command events

case class PostHandler(event: HttpRequestEvent)
case class GetConfigHandler(event: HttpRequestEvent)
case class GetStatsHandler(event: HttpRequestEvent)
case class WsFrameHandler(event: WebSocketFrameEvent)
case class WsStartHandler(webSocketId: String)
//case class HomePage(event: HttpRequestEvent)
//case class ShowQueryStringDataPage(event: HttpRequestEvent)
//case class ShowPostDataPage(event: HttpRequestEvent)


 class ApiHandler extends Logger with Actor {

  implicit val formats = DefaultFormats

  val ok = write(("status" -> "OK"))

  def response(event:HttpRequestEvent, json:String) = {
    event.response.contentType = "application/json"
    event.response.write(json)
    context.stop(self)
  }

  def receive = {

    case GetConfigHandler(event) => {
      val json = ApiCache.config
      log.debug("http - get config {}", json)
      response(event, json)
    }

    case GetStatsHandler(event) => {
      val json = ApiCache.stats
      log.debug("http - get stats {}", json)
      response(event, json)
    }

    case PostHandler(event) => {
      val json = event.request.content.toString()
      log.debug("http - post data {}", json)
      ApiCache.cache(json)
      response(event, ok)
      log.debug("websocket - send all data {}", json)
      Server.web.webSocketConnections.writeText(json)
    }

    case WsFrameHandler(event) => {
      val json = event.readText
      log.debug("websocket - {} - read data {}", Array(event.webSocketId, json))
      ApiCache.cache(json)

      log.debug("websocket - send all data {}", json)
      Server.web.webSocketConnections.writeText(json)
      context.stop(self)
    }
    case WsStartHandler(webSocketId) => {
      val json = ApiCache.config
      log.debug("websocket - {} - connected, get config {}", Array(webSocketId, json))
      Server.web.webSocketConnections.writeText(json, webSocketId)
      context.stop(self)
    }
  }
} 
Example 146
Source File: SparkStageParam.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.stages

import com.salesforce.op.stages.sparkwrappers.generic.SparkWrapperParams
import org.apache.hadoop.fs.Path
import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.param.{Param, ParamPair, Params}
import org.apache.spark.ml.util.{Identifiable, MLReader, MLWritable}
import org.apache.spark.util.SparkUtils
import org.json4s.JsonAST.{JObject, JValue}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{compact, parse, render}
import org.json4s.{DefaultFormats, Formats, JString}

class SparkStageParam[S <: PipelineStage with Params]
(
  parent: String,
  name: String,
  doc: String,
  isValid: Option[S] => Boolean
) extends Param[Option[S]](parent, name, doc, isValid) {

  import SparkStageParam._

  
  override def jsonDecode(jsonStr: String): Option[S] = {
    val json = parse(jsonStr)
    val uid = (json \ "uid").extractOpt[String]
    val path = (json \ "path").extractOpt[String]

    path -> uid match {
      case (None, _) | (_, None) | (_, Some(NoUID)) =>
        savePath = None
        None
      case (Some(p), Some(stageUid)) =>
        savePath = Option(p)
        val stagePath = new Path(p, stageUid).toString
        val className = (json \ "className").extract[String]
        val cls = SparkUtils.classForName(className)
        val stage = cls.getMethod("read").invoke(null).asInstanceOf[MLReader[PipelineStage]].load(stagePath)
        Option(stage).map(_.asInstanceOf[S])
    }
  }
}

object SparkStageParam {
  implicit val formats: Formats = DefaultFormats
  val NoClass = ""
  val NoUID = ""

  def updateParamsMetadataWithPath(jValue: JValue, path: String): JValue = jValue match {
    case JObject(pairs) => JObject(
      pairs.map {
        case (SparkWrapperParams.SparkStageParamName, j) =>
          SparkWrapperParams.SparkStageParamName -> j.merge(JObject("path" -> JString(path)))
        case param => param
      }
    )
    case j => throw new IllegalArgumentException(s"Cannot recognize JSON Spark params metadata: $j")
  }

} 
Example 147
Source File: TransientFeatureArrayParam.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.stages

import com.salesforce.op.features._
import org.apache.spark.ml.param._
import org.apache.spark.ml.util.Identifiable
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JArray, JValue}
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.util.{Failure, Success}



  override def w(value: Array[TransientFeature]): ParamPair[Array[TransientFeature]] = super.w(value)

  override def jsonEncode(value: Array[TransientFeature]): String = {
    compact(render(JArray(value.map(_.toJson).toList)))
  }

  override def jsonDecode(json: String): Array[TransientFeature] = {
    parse(json).extract[Array[JValue]].map(obj => {
      TransientFeature(obj) match {
        case Failure(e) => throw new RuntimeException("Failed to parse TransientFeature", e)
        case Success(v) => v
      }
    })
  }
} 
Example 148
Source File: FeatureJsonHelper.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.features

import com.salesforce.op.features.types._
import com.salesforce.op.stages.{OPStage, OpPipelineStage}
import org.json4s.JsonAST.{JObject, JValue}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods._
import org.json4s.{DefaultFormats, Formats}

import scala.reflect.runtime.universe.WeakTypeTag
import scala.util.Try



  def fromJson(
    json: JValue,
    stages: Map[String, OPStage],
    features: Map[String, OPFeature]
  ): Try[OPFeature] = Try {
    val typeName = (json \ "typeName").extract[String]
    val uid = (json \ "uid").extract[String]
    val name = (json \ "name").extract[String]
    val isResponse = (json \ "isResponse").extract[Boolean]
    val originStageUid = (json \ "originStage").extract[String]
    val parentUids = (json \ "parents").extract[Array[String]]

    val originStage: Option[OPStage] = stages.get(originStageUid)
    if (originStage.isEmpty) {
      throw new RuntimeException(s"Origin stage $originStageUid not found for feature $name ($uid)")
    }

    // Order is important and so are duplicates, eg f = f1 + f1 has 2 parents but both the same feature
    val parents: Seq[OPFeature] = parentUids.flatMap(id => features.get(id))
    if (parents.length != parentUids.length) {
      throw new RuntimeException(s"Not all the parent features were found for feature $name ($uid)")
    }

    val wtt = FeatureType.featureTypeTag(typeName).asInstanceOf[WeakTypeTag[FeatureType]]
    Feature[FeatureType](
      uid = uid,
      name = name,
      isResponse = isResponse,
      parents = parents,
      originStage = originStage.get.asInstanceOf[OpPipelineStage[FeatureType]]
    )(wtt = wtt)

  }

} 
Example 149
Source File: SpecialDoubleSerializerTest.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.utils.json

import com.salesforce.op.test.TestCommon
import org.json4s.jackson.JsonMethods._
import org.json4s.{DefaultFormats, Extraction, Formats}
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner


@RunWith(classOf[JUnitRunner])
class SpecialDoubleSerializerTest extends FlatSpec with TestCommon {

  val data = Map(
    "normal" -> Seq(-1.1, 0.0, 2.3),
    "infs" -> Seq(Double.NegativeInfinity, Double.PositiveInfinity),
    "minMax" -> Seq(Double.MinValue, Double.MaxValue),
    "nan" -> Seq(Double.NaN)
  )

  Spec[SpecialDoubleSerializer] should behave like
    readWriteDoubleValues(data)(
      json = """{"normal":[-1.1,0.0,2.3],"infs":["-Infinity","Infinity"],"minMax":[-1.7976931348623157E308,1.7976931348623157E308],"nan":["NaN"]}""" // scalastyle:off
    )(DefaultFormats + new SpecialDoubleSerializer)

  Spec[SpecialDoubleSerializer] + " (with big decimal)" should behave like
    readWriteDoubleValues(data)(
      json = """{"normal":[-1.1,0.0,2.3],"infs":["-Infinity","Infinity"],"minMax":[-1.7976931348623157E+308,1.7976931348623157E+308],"nan":["NaN"]}""" // scalastyle:off
    )(DefaultFormats.withBigDecimal + new SpecialDoubleSerializer)


  def readWriteDoubleValues(input: Map[String, Seq[Double]])(json: String)(implicit formats: Formats): Unit = {
    it should "write double entries" in {
      compact(Extraction.decompose(input)) shouldBe json
    }
    it should "read double entries" in {
      val parsed = parse(json).extract[Map[String, Seq[Double]]]
      parsed.keys shouldBe input.keys
      parsed zip input foreach {
        case (("nan", a), ("nan", b)) => a.foreach(_.isNaN shouldBe true)
        case ((_, a), (_, b)) => a should contain theSameElementsAs b
      }
    }
  }
} 
Example 150
Source File: EnumEntrySerializerTest.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.utils.json

import com.salesforce.op.test.TestCommon
import enumeratum.{Enum, EnumEntry}
import org.json4s.jackson.JsonMethods._
import org.json4s.{DefaultFormats, Extraction}
import org.junit.runner.RunWith
import org.scalatest.FunSpec
import org.scalatest.junit.JUnitRunner

import scala.util.Success


@RunWith(classOf[JUnitRunner])
class EnumEntrySerializerTest extends FunSpec with TestCommon {

  implicit val formats = DefaultFormats + EnumEntrySerializer.json4s[TestEnumType](TestEnumType)
  val serdes = Seq(EnumEntrySerializer.jackson[TestEnumType](TestEnumType))

  val data = TestData(a = TestEnumType.One, b = Seq(TestEnumType.Two, TestEnumType.Three))
  val dataJson = """{"a":"One","b":["Two","Three"]}"""

  describe("EnumEntrySerializer") {
    describe("(json4s)") {
      it("write enum entries") {
        compact(Extraction.decompose(data)) shouldBe dataJson
      }
      it("read enum entries") {
        parse(dataJson).extract[TestData] shouldBe data
      }
      it("read enum entries ignoring case") {
        parse(dataJson.toLowerCase).extract[TestData] shouldBe data
      }
    }
    describe("(jackson)") {
      it("write enum entries") {
        JsonUtils.toJsonString(data, pretty = false, serdes = serdes) shouldBe dataJson
      }
      it("read enum entries") {
        JsonUtils.fromString[TestData](dataJson, serdes = serdes) shouldBe Success(data)
      }
      it("read enum entries ignoring case") {
        JsonUtils.fromString[TestData](dataJson.toLowerCase, serdes = serdes) shouldBe Success(data)
      }
    }
  }

}

private[json] case class TestData(a: TestEnumType, b: Seq[TestEnumType])

sealed trait TestEnumType extends EnumEntry with Serializable
object TestEnumType extends Enum[TestEnumType] {
  val values = findValues
  case object One extends TestEnumType
  case object Two extends TestEnumType
  case object Three extends TestEnumType
} 
Example 151
Source File: PackedMessage.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.packer

import java.nio.ByteBuffer

import com.google.protobuf.GeneratedMessageV3
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats}

object PackedMessage {
  implicit val formats: Formats = DefaultFormats + new org.json4s.ext.EnumSerializer(PackerType)
  val MAGIC_BYTES: Array[Byte] = "hytc".getBytes("utf-8")
}

case class PackedMessage[T <: GeneratedMessageV3](protoObj: T,
                                                  private val pack: (T => Array[Byte]),
                                                  private val metadata: PackedMetadata) {
  import PackedMessage._
  private lazy val metadataBytes: Array[Byte] = Serialization.write(metadata).getBytes("utf-8")

  val packedDataBytes: Array[Byte] = {
    val packedDataBytes = pack(protoObj)
    if (PackerType.NONE == metadata.t) {
      packedDataBytes
    } else {
      ByteBuffer
        .allocate(MAGIC_BYTES.length + 4 + metadataBytes.length + packedDataBytes.length)
        .put(MAGIC_BYTES)
        .putInt(metadataBytes.length)
        .put(metadataBytes)
        .put(packedDataBytes).array()
    }
  }
} 
Example 152
Source File: TraceIndexDoc.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.clients.es.document

import org.json4s.DefaultFormats
import org.json4s.jackson.Serialization

import scala.collection.mutable

object TraceIndexDoc {
  implicit val formats = DefaultFormats
  type TagKey = String
  type TagValue = Any

  val SERVICE_KEY_NAME = "servicename"
  val OPERATION_KEY_NAME = "operationname"
  val DURATION_KEY_NAME = "duration"
  val START_TIME_KEY_NAME = "starttime"
}

case class TraceIndexDoc(traceid: String, rootduration: Long, starttime: Long, spans: Seq[mutable.Map[String, Any]]) {
  val json: String = Serialization.write(this)(TraceIndexDoc.formats)
} 
Example 153
Source File: WhitelistIndexFieldConfigurationSpec.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.unit

import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhiteListIndexFields, WhitelistIndexField, WhitelistIndexFieldConfiguration}
import org.json4s.ext.EnumNameSerializer
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats}
import org.scalatest.{Entry, FunSpec, Matchers}

import scala.collection.JavaConverters._

class WhitelistIndexFieldConfigurationSpec extends FunSpec with Matchers {

  protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)

  describe("whitelist field configuration") {
    it("an empty configuration should return whitelist fields as empty") {
      val config = WhitelistIndexFieldConfiguration()
      config.indexFieldMap shouldBe 'empty
      config.whitelistIndexFields shouldBe 'empty
    }

    it("a loaded configuration should return the non empty whitelist fields") {
      val whitelistField_1 = WhitelistIndexField(name = "role", `type` = IndexFieldType.string, enableRangeQuery = true)
      val whitelistField_2 = WhitelistIndexField(name = "Errorcode", `type` = IndexFieldType.long)

      val config = WhitelistIndexFieldConfiguration()
      val cfgJsonData = Serialization.write(WhiteListIndexFields(List(whitelistField_1, whitelistField_2)))

      // reload
      config.onReload(cfgJsonData)

      config.whitelistIndexFields.map(_.name) should contain allOf("role", "errorcode")
      config.whitelistIndexFields.filter(r => r.name == "role").head.enableRangeQuery shouldBe true
      config.indexFieldMap.size() shouldBe 2
      config.indexFieldMap.keys().asScala.toList should contain allOf("role", "errorcode")
      config.globalTraceContextIndexFieldNames.size shouldBe 0

      val whitelistField_3 = WhitelistIndexField(name = "status", `type` = IndexFieldType.string, aliases = Set("_status", "HTTP-STATUS"))
      val whitelistField_4 = WhitelistIndexField(name = "something", `type` = IndexFieldType.long, searchContext = "trace")

      val newCfgJsonData = Serialization.write(WhiteListIndexFields(List(whitelistField_1, whitelistField_3, whitelistField_4)))
      config.onReload(newCfgJsonData)

      config.whitelistIndexFields.size shouldBe 5
      config.whitelistIndexFields.map(_.name).toSet should contain allOf("status", "something", "role")
      config.indexFieldMap.size shouldBe 5
      config.indexFieldMap.keys().asScala.toList should contain allOf("status", "something", "role", "http-status", "_status")

      config.onReload(newCfgJsonData)
      config.whitelistIndexFields.size shouldBe 5
      config.whitelistIndexFields.map(_.name).toSet should contain allOf("status", "something", "role")
      config.indexFieldMap.size() shouldBe 5
      config.indexFieldMap.keys().asScala.toList should contain allOf("status", "something", "role", "http-status", "_status")

      config.indexFieldMap.get("http-status").name shouldEqual "status"
      config.indexFieldMap.get("_status").name shouldEqual "status"

      config.globalTraceContextIndexFieldNames.size shouldBe 1
      config.globalTraceContextIndexFieldNames.head shouldEqual "something"
    }
  }
} 
Example 154
Source File: ElasticSearchReadResultListenerSpec.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.reader.unit.stores.readers.es.query

import com.codahale.metrics.{Meter, Timer}
import com.expedia.open.tracing.api.{Field, TracesSearchRequest}
import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhitelistIndexFieldConfiguration}
import com.expedia.www.haystack.trace.reader.config.entities.SpansIndexConfiguration
import com.expedia.www.haystack.trace.reader.exceptions.ElasticSearchClientError
import com.expedia.www.haystack.trace.reader.stores.readers.es.ElasticSearchReadResultListener
import com.expedia.www.haystack.trace.reader.stores.readers.es.query.TraceSearchQueryGenerator
import com.expedia.www.haystack.trace.reader.unit.BaseUnitTestSpec
import io.searchbox.core.SearchResult
import org.easymock.EasyMock
import org.json4s.ext.EnumNameSerializer
import org.json4s.{DefaultFormats, Formats}

import scala.concurrent.Promise

class ElasticSearchReadResultListenerSpec extends BaseUnitTestSpec {
  protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)
  val ES_INDEX_HOUR_BUCKET = 6
  val ES_INDEX_HOUR_TTL = 72

  private val spansIndexConfiguration = SpansIndexConfiguration(
    indexNamePrefix = "haystack-traces",
    indexType = "spans",
    indexHourTtl = ES_INDEX_HOUR_TTL,
    indexHourBucket = ES_INDEX_HOUR_BUCKET,
    useRootDocumentStartTime = false)


  private val searchRequest = {
    val generator = new TraceSearchQueryGenerator(spansIndexConfiguration, "spans", WhitelistIndexFieldConfiguration())
    val field = Field.newBuilder().setName("serviceName").setValue("expweb").build()
    generator.generate(TracesSearchRequest.newBuilder().setStartTime(1510469157572000l).setEndTime(1510469161172000l).setLimit(40).addFields(field).build(), true)
  }

  describe("ElasticSearch Read Result Listener") {
    it("should invoke successful promise with search result") {
      val promise = mock[Promise[SearchResult]]
      val timer = mock[Timer.Context]
      val failureMeter = mock[Meter]
      val searchResult = mock[SearchResult]

      expecting {
        timer.close().once()
        searchResult.getResponseCode.andReturn(200).atLeastOnce()
        promise.success(searchResult).andReturn(promise).once()
      }

      whenExecuting(promise, timer, failureMeter, searchResult) {
        val listener = new ElasticSearchReadResultListener(searchRequest, promise, timer, failureMeter)
        listener.completed(searchResult)
      }
    }

    it("should invoke failed promise with exception object if response code is not 2xx ") {
      val promise = mock[Promise[SearchResult]]
      val timer = mock[Timer.Context]
      val failureMeter = mock[Meter]
      val searchResult = mock[SearchResult]

      expecting {
        timer.close().once()
        searchResult.getResponseCode.andReturn(500).atLeastOnce()
        searchResult.getJsonString.andReturn("json-string").times(2)
        failureMeter.mark()
        promise.failure(EasyMock.anyObject(classOf[ElasticSearchClientError])).andReturn(promise).once()
      }

      whenExecuting(promise, timer, failureMeter, searchResult) {
        val listener = new ElasticSearchReadResultListener(searchRequest, promise, timer, failureMeter)
        listener.completed(searchResult)
      }
    }

    it("should invoke failed promise with exception object if failure is generated") {
      val promise = mock[Promise[SearchResult]]
      val timer = mock[Timer.Context]
      val failureMeter = mock[Meter]
      val expectedException = new Exception

      expecting {
        timer.close().once()
        failureMeter.mark()
        promise.failure(expectedException).andReturn(promise).once()
      }

      whenExecuting(promise, timer, failureMeter) {
        val listener = new ElasticSearchReadResultListener(searchRequest, promise, timer, failureMeter)
        listener.failed(expectedException)
      }
    }
  }
}