org.json4s.jackson.Serialization Scala Examples

The following examples show how to use org.json4s.jackson.Serialization. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TraceIndexDoc.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.clients.es.document

import org.json4s.DefaultFormats
import org.json4s.jackson.Serialization

import scala.collection.mutable

object TraceIndexDoc {
  implicit val formats = DefaultFormats
  type TagKey = String
  type TagValue = Any

  val SERVICE_KEY_NAME = "servicename"
  val OPERATION_KEY_NAME = "operationname"
  val DURATION_KEY_NAME = "duration"
  val START_TIME_KEY_NAME = "starttime"
}

case class TraceIndexDoc(traceid: String, rootduration: Long, starttime: Long, spans: Seq[mutable.Map[String, Any]]) {
  val json: String = Serialization.write(this)(TraceIndexDoc.formats)
} 
Example 2
Source File: EarnDotComFeeProvider.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.fee

import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JArray, JInt, JValue}
import org.json4s.jackson.Serialization

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


class EarnDotComFeeProvider(readTimeOut: Duration)(implicit http: SttpBackend[Future, Nothing], ec: ExecutionContext) extends FeeProvider {

  import EarnDotComFeeProvider._

  implicit val formats = DefaultFormats
  implicit val serialization = Serialization

  val uri = uri"https://bitcoinfees.earn.com/api/v1/fees/list"

  override def getFeerates: Future[FeeratesPerKB] =
    for {
      json <- sttp.readTimeout(readTimeOut).get(uri)
        .response(asJson[JValue])
        .send()
      feeRanges = parseFeeRanges(json.unsafeBody)
    } yield extractFeerates(feeRanges)

}

object EarnDotComFeeProvider {

  case class FeeRange(minFee: Long, maxFee: Long, memCount: Long, minDelay: Long, maxDelay: Long)

  def parseFeeRanges(json: JValue): Seq[FeeRange] = {
    val JArray(items) = json \ "fees"
    items.map(item => {
      val JInt(minFee) = item \ "minFee"
      val JInt(maxFee) = item \ "maxFee"
      val JInt(memCount) = item \ "memCount"
      val JInt(minDelay) = item \ "minDelay"
      val JInt(maxDelay) = item \ "maxDelay"
      // earn.com returns fees in Satoshi/byte and we want Satoshi/KiloByte
      FeeRange(minFee = 1000 * minFee.toLong, maxFee = 1000 * maxFee.toLong, memCount = memCount.toLong, minDelay = minDelay.toLong, maxDelay = maxDelay.toLong)
    })
  }

  def extractFeerate(feeRanges: Seq[FeeRange], maxBlockDelay: Int): Long = {
    // first we keep only fee ranges with a max block delay below the limit
    val belowLimit = feeRanges.filter(_.maxDelay <= maxBlockDelay)
    // out of all the remaining fee ranges, we select the one with the minimum higher bound and make sure it is > 0
    Math.max(belowLimit.minBy(_.maxFee).maxFee, 1)
  }

  def extractFeerates(feeRanges: Seq[FeeRange]): FeeratesPerKB =
    FeeratesPerKB(
      block_1 = extractFeerate(feeRanges, 1),
      blocks_2 = extractFeerate(feeRanges, 2),
      blocks_6 = extractFeerate(feeRanges, 6),
      blocks_12 = extractFeerate(feeRanges, 12),
      blocks_36 = extractFeerate(feeRanges, 36),
      blocks_72 = extractFeerate(feeRanges, 72),
      blocks_144 = extractFeerate(feeRanges, 144))

} 
Example 3
Source File: BitgoFeeProvider.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.fee

import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import fr.acinq.bitcoin.{Block, ByteVector32}
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JInt, JValue}
import org.json4s.jackson.Serialization

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

class BitgoFeeProvider(chainHash: ByteVector32, readTimeOut: Duration)(implicit http: SttpBackend[Future, Nothing], ec: ExecutionContext) extends FeeProvider {

  import BitgoFeeProvider._

  implicit val formats = DefaultFormats
  implicit val serialization = Serialization

  val uri = chainHash match {
    case Block.LivenetGenesisBlock.hash => uri"https://www.bitgo.com/api/v2/btc/tx/fee"
    case _ => uri"https://test.bitgo.com/api/v2/tbtc/tx/fee"
  }

  override def getFeerates: Future[FeeratesPerKB] =
    for {
      res <- sttp.readTimeout(readTimeOut).get(uri)
        .response(asJson[JValue])
        .send()
      feeRanges = parseFeeRanges(res.unsafeBody)
    } yield extractFeerates(feeRanges)

}

object BitgoFeeProvider {

  case class BlockTarget(block: Int, fee: Long)

  def parseFeeRanges(json: JValue): Seq[BlockTarget] = {
    val blockTargets = json \ "feeByBlockTarget"
    blockTargets.foldField(Seq.empty[BlockTarget]) {
      // BitGo returns estimates in Satoshi/KB, which is what we want
      case (list, (strBlockTarget, JInt(feePerKB))) => list :+ BlockTarget(strBlockTarget.toInt, feePerKB.longValue)
    }
  }

  def extractFeerate(feeRanges: Seq[BlockTarget], maxBlockDelay: Int): Long = {
    // first we keep only fee ranges with a max block delay below the limit
    val belowLimit = feeRanges.filter(_.block <= maxBlockDelay)
    // out of all the remaining fee ranges, we select the one with the minimum higher bound
    belowLimit.map(_.fee).min
  }

  def extractFeerates(feeRanges: Seq[BlockTarget]): FeeratesPerKB =
    FeeratesPerKB(
      block_1 = extractFeerate(feeRanges, 1),
      blocks_2 = extractFeerate(feeRanges, 2),
      blocks_6 = extractFeerate(feeRanges, 6),
      blocks_12 = extractFeerate(feeRanges, 12),
      blocks_36 = extractFeerate(feeRanges, 36),
      blocks_72 = extractFeerate(feeRanges, 72),
      blocks_144 = extractFeerate(feeRanges, 144))

} 
Example 4
Source File: BasicBitcoinJsonRPCClient.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.bitcoind.rpc

import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import fr.acinq.bitcoin.ByteVector32
import fr.acinq.eclair.KamonExt
import fr.acinq.eclair.blockchain.Monitoring.{Metrics, Tags}
import org.json4s.{CustomSerializer, DefaultFormats}
import org.json4s.JsonAST.{JString, JValue}
import org.json4s.jackson.Serialization

import scala.concurrent.{ExecutionContext, Future}

class BasicBitcoinJsonRPCClient(user: String, password: String, host: String = "127.0.0.1", port: Int = 8332, ssl: Boolean = false)(implicit http: SttpBackend[Future, Nothing]) extends BitcoinJsonRPCClient {

  // necessary to properly serialize ByteVector32 into String readable by bitcoind
  object ByteVector32Serializer extends CustomSerializer[ByteVector32](_ => ( {
    null
  }, {
    case x: ByteVector32 => JString(x.toHex)
  }))
  implicit val formats = DefaultFormats.withBigDecimal + ByteVector32Serializer
  private val scheme = if (ssl) "https" else "http"
  private val serviceUri = uri"$scheme://$host:$port/wallet/" // wallet/ specifies to use the default bitcoind wallet, named ""
  implicit val serialization = Serialization

  override def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JValue] =
    invoke(Seq(JsonRPCRequest(method = method, params = params))).map(l => jsonResponse2Exception(l.head).result)

  def jsonResponse2Exception(jsonRPCResponse: JsonRPCResponse): JsonRPCResponse = jsonRPCResponse match {
    case JsonRPCResponse(_, Some(error), _) => throw JsonRPCError(error)
    case o => o
  }

  def invoke(requests: Seq[JsonRPCRequest])(implicit ec: ExecutionContext): Future[Seq[JsonRPCResponse]] = {
    requests.groupBy(_.method).foreach {
      case (method, calls) => Metrics.RpcBasicInvokeCount.withTag(Tags.Method, method).increment(calls.size)
    }
    KamonExt.timeFuture(Metrics.RpcBasicInvokeDuration.withoutTags()) {
      for {
        res <- sttp
          .post(serviceUri)
          .body(requests)
          .auth.basic(user, password)
          .response(asJson[Seq[JsonRPCResponse]])
          .send()
      } yield res.unsafeBody
    }
  }

} 
Example 5
Source File: ErrorEventsWriter.scala    From etl-light   with MIT License 5 votes vote down vote up
package yamrcraft.etlite.writers

import java.io.OutputStream

import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, IntWritable, SequenceFile, Text}
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats, ShortTypeHints}
import yamrcraft.etlite.utils.FileUtils


case class ErrorInfo(
  errorType: String,
  errorMsg: Option[String]
)


class ErrorEventWriter(folder: String, jobId: Long, partitionId: Int)
  extends ErrorEventsWriter {

  // incremental record id
  var recordId = 1

  val fs = FileUtils.getFS(folder)

  val seqPath = new Path(folder, s"errors_job${jobId}_part$partitionId.seq")
  if (fs.exists(seqPath)) {
    fs.delete(seqPath, false)
  }

  val metaPath = new Path(folder, s"errors_job${jobId}_part$partitionId.meta.seq")
  if (fs.exists(metaPath)) {
    fs.delete(metaPath, false)
  }

  private var seqWriter: Option[SequenceFile.Writer] = None
  private var metaWriter: Option[SequenceFile.Writer] = None

  implicit val formats =
    new Formats {
      val dateFormat = DefaultFormats.lossless.dateFormat
      override val typeHints = ShortTypeHints(List(classOf[ErrorInfo]))
      override val typeHintFieldName = "type"
    }

  override def write(errorEvent: (Array[Byte], ErrorInfo)) = {

    if (seqWriter.isEmpty) {
      seqWriter = createSequenceFile(seqPath, classOf[IntWritable], classOf[BytesWritable])
      metaWriter = createSequenceFile(metaPath, classOf[IntWritable], classOf[Text])
    }

    val id = new IntWritable(recordId)
    seqWriter.get.append(id, new BytesWritable(errorEvent._1))
    metaWriter.get.append(id, new Text(Serialization.write(errorEvent._2)))

    recordId += 1
  }

  override def commit() = {
    seqWriter.foreach(p => p.close())
    metaWriter.foreach(p => p.close())
  }

  private def createSequenceFile(path: Path, keyClass: Class[_], valueClass: Class[_]) = {
    val optPath = SequenceFile.Writer.file(path)
    val optKey = SequenceFile.Writer.keyClass(keyClass)
    val optVal = SequenceFile.Writer.valueClass(valueClass)
    Some(SequenceFile.createWriter(fs.getConf, optPath, optKey, optVal))
  }
} 
Example 6
Source File: PTypeSuite.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.expr.ir

import is.hail.HailSuite
import is.hail.types.physical._
import is.hail.types.virtual
import is.hail.types.virtual.{TArray, TDict, TInt32, TInterval, TStruct}
import is.hail.rvd.AbstractRVDSpec
import is.hail.utils._
import is.hail.variant.ReferenceGenome
import org.apache.spark.sql.Row
import org.json4s.jackson.Serialization
import org.testng.annotations.{DataProvider, Test}

class PTypeSuite extends HailSuite {

  @DataProvider(name="ptypes")
  def ptypes(): Array[Array[Any]] = {
    Array[PType](
      PInt32(true),
      PInt32(false),
      PInt64(true),
      PInt64(false),
      PFloat32(true),
      PFloat64(true),
      PBoolean(true),
      PCanonicalCall(true),
      PCanonicalBinary(false),
      PCanonicalString(true),
      PCanonicalLocus(ReferenceGenome.GRCh37, false),
      PCanonicalArray(PInt32Required, true),
      PCanonicalSet(PInt32Required, false),
      PCanonicalDict(PInt32Required, PCanonicalString(true), true),
      PCanonicalInterval(PInt32Optional, false),
      PCanonicalTuple(FastIndexedSeq(PTupleField(1, PInt32Required), PTupleField(3, PCanonicalString(false))), true),
      PCanonicalStruct(FastIndexedSeq(PField("foo", PInt32Required, 0), PField("bar", PCanonicalString(false), 1)), true)
    ).map(t => Array(t: Any))
  }

  @Test def testPTypesDataProvider(): Unit = {
    ptypes()
  }

  @Test(dataProvider="ptypes")
  def testSerialization(ptype: PType): Unit = {
    implicit val formats = AbstractRVDSpec.formats
    val s = Serialization.write(ptype)
    assert(Serialization.read[PType](s) == ptype)
  }

  @Test def testLiteralPType(): Unit = {
    assert(PType.literalPType(TInt32, 5) == PInt32(true))
    assert(PType.literalPType(TInt32, null) == PInt32())

    assert(PType.literalPType(TArray(TInt32), null) == PCanonicalArray(PInt32(true)))
    assert(PType.literalPType(TArray(TInt32), FastIndexedSeq(1, null)) == PCanonicalArray(PInt32(), true))
    assert(PType.literalPType(TArray(TInt32), FastIndexedSeq(1, 5)) == PCanonicalArray(PInt32(true), true))

    assert(PType.literalPType(TInterval(TInt32), Interval(5, null, false, true)) == PCanonicalInterval(PInt32(), true))

    val p = TStruct("a" -> TInt32, "b" -> TInt32)
    val d = TDict(p, p)
    assert(PType.literalPType(d, Map(Row(3, null) -> Row(null, 3))) ==
      PCanonicalDict(
        PCanonicalStruct(true, "a" -> PInt32(true), "b" -> PInt32()),
        PCanonicalStruct(true, "a" -> PInt32(), "b" -> PInt32(true)),
        true))
  }
} 
Example 7
Source File: JsonUtils.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.kafka.common.TopicPartition
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


  def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
    val result = new HashMap[String, HashMap[Int, Long]]()
    implicit val ordering = new Ordering[TopicPartition] {
      override def compare(x: TopicPartition, y: TopicPartition): Int = {
        Ordering.Tuple2[String, Int].compare((x.topic, x.partition), (y.topic, y.partition))
      }
    }
    val partitions = partitionOffsets.keySet.toSeq.sorted  // sort for more determinism
    partitions.foreach { tp =>
        val off = partitionOffsets(tp)
        val parts = result.getOrElse(tp.topic, new HashMap[Int, Long])
        parts += tp.partition -> off
        result += tp.topic -> parts
    }
    Serialization.write(result)
  }
} 
Example 8
Source File: FileStreamSourceOffset.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import scala.util.control.Exception._

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


case class FileStreamSourceOffset(logOffset: Long) extends Offset {
  override def json: String = {
    Serialization.write(this)(FileStreamSourceOffset.format)
  }
}

object FileStreamSourceOffset {
  implicit val format = Serialization.formats(NoTypeHints)

  def apply(offset: Offset): FileStreamSourceOffset = {
    offset match {
      case f: FileStreamSourceOffset => f
      case SerializedOffset(str) =>
        catching(classOf[NumberFormatException]).opt {
          FileStreamSourceOffset(str.toLong)
        }.getOrElse {
          Serialization.read[FileStreamSourceOffset](str)
        }
      case _ =>
        throw new IllegalArgumentException(
          s"Invalid conversion from offset of ${offset.getClass} to FileStreamSourceOffset")
    }
  }
} 
Example 9
Source File: FileStreamSinkLog.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import org.apache.hadoop.fs.{FileStatus, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.{read, write}

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.SQLConf


class FileStreamSinkLog(
    metadataLogVersion: String,
    sparkSession: SparkSession,
    path: String)
  extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) {

  private implicit val formats = Serialization.formats(NoTypeHints)

  protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay

  protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion

  protected override val defaultCompactInterval =
    sparkSession.sessionState.conf.fileSinkLogCompactInterval

  require(defaultCompactInterval > 0,
    s"Please set ${SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key} (was $defaultCompactInterval) " +
      "to a positive value.")

  override def compactLogs(logs: Seq[SinkFileStatus]): Seq[SinkFileStatus] = {
    val deletedFiles = logs.filter(_.action == FileStreamSinkLog.DELETE_ACTION).map(_.path).toSet
    if (deletedFiles.isEmpty) {
      logs
    } else {
      logs.filter(f => !deletedFiles.contains(f.path))
    }
  }
}

object FileStreamSinkLog {
  val VERSION = "v1"
  val DELETE_ACTION = "delete"
  val ADD_ACTION = "add"
} 
Example 10
Source File: StreamMetadata.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets

import scala.util.control.NonFatal

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataInputStream, FSDataOutputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.internal.Logging
import org.apache.spark.sql.streaming.StreamingQuery


  def write(
      metadata: StreamMetadata,
      metadataFile: Path,
      hadoopConf: Configuration): Unit = {
    var output: FSDataOutputStream = null
    try {
      val fs = FileSystem.get(hadoopConf)
      output = fs.create(metadataFile)
      val writer = new OutputStreamWriter(output)
      Serialization.write(metadata, writer)
      writer.close()
    } catch {
      case NonFatal(e) =>
        logError(s"Error writing stream metadata $metadata to $metadataFile", e)
        throw e
    } finally {
      IOUtils.closeQuietly(output)
    }
  }
} 
Example 11
Source File: PackedMessage.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.packer

import java.nio.ByteBuffer

import com.google.protobuf.GeneratedMessageV3
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats}

object PackedMessage {
  implicit val formats: Formats = DefaultFormats + new org.json4s.ext.EnumSerializer(PackerType)
  val MAGIC_BYTES: Array[Byte] = "hytc".getBytes("utf-8")
}

case class PackedMessage[T <: GeneratedMessageV3](protoObj: T,
                                                  private val pack: (T => Array[Byte]),
                                                  private val metadata: PackedMetadata) {
  import PackedMessage._
  private lazy val metadataBytes: Array[Byte] = Serialization.write(metadata).getBytes("utf-8")

  val packedDataBytes: Array[Byte] = {
    val packedDataBytes = pack(protoObj)
    if (PackerType.NONE == metadata.t) {
      packedDataBytes
    } else {
      ByteBuffer
        .allocate(MAGIC_BYTES.length + 4 + metadataBytes.length + packedDataBytes.length)
        .put(MAGIC_BYTES)
        .putInt(metadataBytes.length)
        .put(metadataBytes)
        .put(packedDataBytes).array()
    }
  }
} 
Example 12
Source File: Unpacker.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.packer

import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStream}
import java.nio.ByteBuffer
import java.util.zip.GZIPInputStream

import com.expedia.open.tracing.buffer.SpanBuffer
import com.github.luben.zstd.ZstdInputStream
import org.apache.commons.io.IOUtils
import org.json4s.jackson.Serialization
import org.xerial.snappy.SnappyInputStream

object Unpacker {
  import PackedMessage._

  private def readMetadata(packedDataBytes: Array[Byte]): Array[Byte] = {
    val byteBuffer = ByteBuffer.wrap(packedDataBytes)
    val magicBytesExist = MAGIC_BYTES.indices forall { idx => byteBuffer.get() == MAGIC_BYTES.apply(idx) }
    if (magicBytesExist) {
      val headerLength = byteBuffer.getInt
      val metadataBytes = new Array[Byte](headerLength)
      byteBuffer.get(metadataBytes, 0, headerLength)
      metadataBytes
    } else {
      null
    }
  }

  private def unpack(compressedStream: InputStream) = {
    val outputStream = new ByteArrayOutputStream()
    IOUtils.copy(compressedStream, outputStream)
    outputStream.toByteArray
  }

  def readSpanBuffer(packedDataBytes: Array[Byte]): SpanBuffer = {
    var parsedDataBytes: Array[Byte] = null
    val metadataBytes = readMetadata(packedDataBytes)
    if (metadataBytes != null) {
      val packedMetadata = Serialization.read[PackedMetadata](new String(metadataBytes))
      val compressedDataOffset = MAGIC_BYTES.length + 4 + metadataBytes.length
      packedMetadata.t match {
        case PackerType.SNAPPY =>
          parsedDataBytes = unpack(
            new SnappyInputStream(
              new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset)))
        case PackerType.GZIP =>
          parsedDataBytes = unpack(
            new GZIPInputStream(
              new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset)))
        case PackerType.ZSTD =>
          parsedDataBytes = unpack(
            new ZstdInputStream(
              new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset)))
        case _ =>
          return SpanBuffer.parseFrom(
            new ByteArrayInputStream(packedDataBytes, compressedDataOffset, packedDataBytes.length - compressedDataOffset))
      }
    } else {
      parsedDataBytes = packedDataBytes
    }
    SpanBuffer.parseFrom(parsedDataBytes)
  }
} 
Example 13
Source File: ImageProcessor.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut.gherkin.processors

import report.donut.gherkin.model.Embedding
import org.json4s.jackson.Serialization
import org.json4s.{Formats, NoTypeHints, jackson}

object ImageProcessor {

  var imageMap = scala.collection.mutable.Map[Int, Embedding]()

  def getScreenshotIds(embeddings: List[Embedding]): String = {
    val a: Map[Int, Embedding] = embeddings.map(e => (e.data.hashCode -> e)).toMap
    a.map(a => imageMap += a)
    a.map(a => a._1).toList.mkString(",")
  }

  def allImages: String = {
    implicit def json4sJacksonFormats: Formats = jackson.Serialization.formats(NoTypeHints)
    val b: List[Embedding] = imageMap.map { case (k, v) => new Embedding(v.mime_type, v.data, k) }.toList
    Serialization.writePretty(b)
  }

} 
Example 14
Source File: WhitelistIndexFieldConfigurationSpec.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.unit

import com.expedia.www.haystack.trace.commons.config.entities.{IndexFieldType, WhiteListIndexFields, WhitelistIndexField, WhitelistIndexFieldConfiguration}
import org.json4s.ext.EnumNameSerializer
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats}
import org.scalatest.{Entry, FunSpec, Matchers}

import scala.collection.JavaConverters._

class WhitelistIndexFieldConfigurationSpec extends FunSpec with Matchers {

  protected implicit val formats: Formats = DefaultFormats + new EnumNameSerializer(IndexFieldType)

  describe("whitelist field configuration") {
    it("an empty configuration should return whitelist fields as empty") {
      val config = WhitelistIndexFieldConfiguration()
      config.indexFieldMap shouldBe 'empty
      config.whitelistIndexFields shouldBe 'empty
    }

    it("a loaded configuration should return the non empty whitelist fields") {
      val whitelistField_1 = WhitelistIndexField(name = "role", `type` = IndexFieldType.string, enableRangeQuery = true)
      val whitelistField_2 = WhitelistIndexField(name = "Errorcode", `type` = IndexFieldType.long)

      val config = WhitelistIndexFieldConfiguration()
      val cfgJsonData = Serialization.write(WhiteListIndexFields(List(whitelistField_1, whitelistField_2)))

      // reload
      config.onReload(cfgJsonData)

      config.whitelistIndexFields.map(_.name) should contain allOf("role", "errorcode")
      config.whitelistIndexFields.filter(r => r.name == "role").head.enableRangeQuery shouldBe true
      config.indexFieldMap.size() shouldBe 2
      config.indexFieldMap.keys().asScala.toList should contain allOf("role", "errorcode")
      config.globalTraceContextIndexFieldNames.size shouldBe 0

      val whitelistField_3 = WhitelistIndexField(name = "status", `type` = IndexFieldType.string, aliases = Set("_status", "HTTP-STATUS"))
      val whitelistField_4 = WhitelistIndexField(name = "something", `type` = IndexFieldType.long, searchContext = "trace")

      val newCfgJsonData = Serialization.write(WhiteListIndexFields(List(whitelistField_1, whitelistField_3, whitelistField_4)))
      config.onReload(newCfgJsonData)

      config.whitelistIndexFields.size shouldBe 5
      config.whitelistIndexFields.map(_.name).toSet should contain allOf("status", "something", "role")
      config.indexFieldMap.size shouldBe 5
      config.indexFieldMap.keys().asScala.toList should contain allOf("status", "something", "role", "http-status", "_status")

      config.onReload(newCfgJsonData)
      config.whitelistIndexFields.size shouldBe 5
      config.whitelistIndexFields.map(_.name).toSet should contain allOf("status", "something", "role")
      config.indexFieldMap.size() shouldBe 5
      config.indexFieldMap.keys().asScala.toList should contain allOf("status", "something", "role", "http-status", "_status")

      config.indexFieldMap.get("http-status").name shouldEqual "status"
      config.indexFieldMap.get("_status").name shouldEqual "status"

      config.globalTraceContextIndexFieldNames.size shouldBe 1
      config.globalTraceContextIndexFieldNames.head shouldEqual "something"
    }
  }
} 
Example 15
Source File: LambdaHTTPApiAnnotation.scala    From quaich   with Apache License 2.0 5 votes vote down vote up
package codes.bytes.quaich.api.http.macros

import scala.annotation.{StaticAnnotation, compileTimeOnly}
import scala.language.postfixOps
import scala.reflect.macros.blackbox
import scala.language.experimental.macros

object LambdaHTTPApi {
  // todo - check for companion object and reject
  def annotation_impl(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
    import c.universe._
    import Flag._

    val p = c.enclosingPosition

    val inputs = annottees.map(_.tree).toList



    val result: Tree = inputs match {
      case (cls @ q"$mods class $name[..$tparams] extends ..$parents { ..$body }") :: Nil if mods.hasFlag(ABSTRACT) ⇒
        c.abort(p, "! The @LambdaHTTPApi annotation is not valid on abstract classes.")
        cls
      // todo - detect and handle companion object!
      case (cls @ q"$mods class $name[..$tparams] extends ..$parents { ..$body }") :: Nil ⇒
        //val baseName = name.decodedName.toString
        //val handlerName = TermName(s"$baseName$$RequestHandler")
        //val handlerName = name.toTermName
        val handlerName = name.asInstanceOf[TypeName].toTermName

        val cls = q"""
        $mods class $name[..$tparams](
            val request: codes.bytes.quaich.api.http.LambdaHTTPRequest,
            val context: codes.bytes.quaich.api.http.LambdaContext
          )
          extends ..$parents
          with codes.bytes.quaich.api.http.HTTPHandler {
            import org.json4s.jackson.JsonMethods._
            import org.json4s.jackson.Serialization
            import org.json4s.jackson.Serialization._
            import org.json4s.{NoTypeHints, _}

            protected implicit val formats = Serialization.formats(NoTypeHints)

            ..$body
          }
        """

        val obj = q"""
        object $handlerName extends codes.bytes.quaich.api.http.HTTPApp {
          def newHandler(
            request: codes.bytes.quaich.api.http.LambdaHTTPRequest,
            context: codes.bytes.quaich.api.http.LambdaContext
          ): codes.bytes.quaich.api.http.HTTPHandler =
            new $name(request, context)


        }
        """

        q"$cls; $obj"

      case Nil ⇒
        c.abort(p, s"Cannot annotate an empty Tree.")
      case _ ⇒
        c.abort(p, s"! The @LambdaHTTPApi Annotation is only valid on non-abstract Classes")
    }

    //c.info(p, "result: " + result, force = true)

    c.Expr[Any](result)

  }

}

@compileTimeOnly("Setup the macro paradise compiler plugin to enable expansion of macro annotations.")
class LambdaHTTPApi extends StaticAnnotation {

  def macroTransform(annottees: Any*): Any = macro LambdaHTTPApi.annotation_impl

}
// vim: set ts=2 sw=2 sts=2 et: 
Example 16
Source File: JsonUtils.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.kafka.common.TopicPartition
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


  def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
    val result = new HashMap[String, HashMap[Int, Long]]()
    implicit val ordering = new Ordering[TopicPartition] {
      override def compare(x: TopicPartition, y: TopicPartition): Int = {
        Ordering.Tuple2[String, Int].compare((x.topic, x.partition), (y.topic, y.partition))
      }
    }
    val partitions = partitionOffsets.keySet.toSeq.sorted  // sort for more determinism
    partitions.foreach { tp =>
        val off = partitionOffsets(tp)
        val parts = result.getOrElse(tp.topic, new HashMap[Int, Long])
        parts += tp.partition -> off
        result += tp.topic -> parts
    }
    Serialization.write(result)
  }
} 
Example 17
Source File: FileStreamSourceOffset.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import scala.util.control.Exception._

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


case class FileStreamSourceOffset(logOffset: Long) extends Offset {
  override def json: String = {
    Serialization.write(this)(FileStreamSourceOffset.format)
  }
}

object FileStreamSourceOffset {
  implicit val format = Serialization.formats(NoTypeHints)

  def apply(offset: Offset): FileStreamSourceOffset = {
    offset match {
      case f: FileStreamSourceOffset => f
      case SerializedOffset(str) =>
        catching(classOf[NumberFormatException]).opt {
          FileStreamSourceOffset(str.toLong)
        }.getOrElse {
          Serialization.read[FileStreamSourceOffset](str)
        }
      case _ =>
        throw new IllegalArgumentException(
          s"Invalid conversion from offset of ${offset.getClass} to FileStreamSourceOffset")
    }
  }
} 
Example 18
Source File: FileStreamSinkLog.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.net.URI

import org.apache.hadoop.fs.{FileStatus, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.SQLConf


class FileStreamSinkLog(
    metadataLogVersion: Int,
    sparkSession: SparkSession,
    path: String)
  extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) {

  private implicit val formats = Serialization.formats(NoTypeHints)

  protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay

  protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion

  protected override val defaultCompactInterval =
    sparkSession.sessionState.conf.fileSinkLogCompactInterval

  require(defaultCompactInterval > 0,
    s"Please set ${SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key} (was $defaultCompactInterval) " +
      "to a positive value.")

  override def compactLogs(logs: Seq[SinkFileStatus]): Seq[SinkFileStatus] = {
    val deletedFiles = logs.filter(_.action == FileStreamSinkLog.DELETE_ACTION).map(_.path).toSet
    if (deletedFiles.isEmpty) {
      logs
    } else {
      logs.filter(f => !deletedFiles.contains(f.path))
    }
  }
}

object FileStreamSinkLog {
  val VERSION = 1
  val DELETE_ACTION = "delete"
  val ADD_ACTION = "add"
} 
Example 19
Source File: StreamMetadata.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets

import scala.util.control.NonFatal

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataInputStream, FSDataOutputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.internal.Logging
import org.apache.spark.sql.streaming.StreamingQuery


  def write(
      metadata: StreamMetadata,
      metadataFile: Path,
      hadoopConf: Configuration): Unit = {
    var output: FSDataOutputStream = null
    try {
      val fs = metadataFile.getFileSystem(hadoopConf)
      output = fs.create(metadataFile)
      val writer = new OutputStreamWriter(output)
      Serialization.write(metadata, writer)
      writer.close()
    } catch {
      case NonFatal(e) =>
        logError(s"Error writing stream metadata $metadata to $metadataFile", e)
        throw e
    } finally {
      IOUtils.closeQuietly(output)
    }
  }
} 
Example 20
Source File: KinesisPosition.scala    From kinesis-sql   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kinesis

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

trait KinesisPosition extends Serializable {
  val iteratorType: String
  val iteratorPosition: String

  override def toString: String = s"KinesisPosition($iteratorType, $iteratorPosition)"
}

class TrimHorizon() extends KinesisPosition {
  override val iteratorType = "TRIM_HORIZON"
  override val iteratorPosition = ""
}

class Latest() extends KinesisPosition {
  override val iteratorType = "LATEST"
  override val iteratorPosition = ""
}

class AtTimeStamp(timestamp: String) extends KinesisPosition {
  def this(timestamp: Long) {
    this(timestamp.toString)
  }
  override val iteratorType = "AT_TIMESTAMP"
  override val iteratorPosition = timestamp.toString
}

class AfterSequenceNumber(seqNumber: String) extends KinesisPosition {
  override val iteratorType = "AFTER_SEQUENCE_NUMBER"
  override val iteratorPosition = seqNumber
}

class AtSequenceNumber(seqNumber: String) extends KinesisPosition {
  override val iteratorType = "AT_SEQUENCE_NUMBER"
  override val iteratorPosition = seqNumber
}

class ShardEnd() extends KinesisPosition {
  override val iteratorType = "SHARD_END"
  override val iteratorPosition = ""
}

private[kinesis] object KinesisPosition {
  def make(iteratorType: String, iteratorPosition: String): KinesisPosition = iteratorType match {
    case iterType if "TRIM_HORIZON".equalsIgnoreCase(iterType) => new TrimHorizon()
    case iterType if "LATEST".equalsIgnoreCase(iterType) => new Latest()
    case iterType if "AT_TIMESTAMP".equalsIgnoreCase(iterType) => new AtTimeStamp(iteratorPosition)
    case iterType if "AT_SEQUENCE_NUMBER".equalsIgnoreCase(iterType) =>
      new AtSequenceNumber(iteratorPosition)
    case iterType if "AFTER_SEQUENCE_NUMBER".equalsIgnoreCase(iterType) =>
      new AfterSequenceNumber(iteratorPosition)
    case iterType if "SHARD_END".equalsIgnoreCase(iterType) => new ShardEnd()
  }
}


  def fromCheckpointJson(text: String, defaultPosition: KinesisPosition): InitialKinesisPosition = {
    val kso = KinesisSourceOffset(text)
    val shardOffsets = kso.shardsToOffsets

    new InitialKinesisPosition(
      shardOffsets.shardInfoMap
        .map(si => si._1 -> KinesisPosition.make(si._2.iteratorType, si._2.iteratorPosition)),
      defaultPosition
      )
  }
} 
Example 21
Source File: KinesisSourceOffset.scala    From kinesis-sql   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kinesis

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.spark.sql.execution.streaming.Offset
import org.apache.spark.sql.execution.streaming.SerializedOffset
import org.apache.spark.sql.sources.v2.reader.streaming.{Offset => OffsetV2, PartitionOffset}


 
  def apply(json: String): KinesisSourceOffset = {
    try {
      val readObj = Serialization.read[ Map[ String, Map[ String, String ] ] ](json)
      val metadata = readObj.get("metadata")
      val shardInfoMap: Map[String, ShardInfo ] = readObj.filter(_._1 != "metadata").map {
        case (shardId, value) => shardId.toString -> new ShardInfo(shardId.toString,
          value.get("iteratorType").get,
          value.get("iteratorPosition").get)
      }.toMap
      KinesisSourceOffset(
        new ShardOffsets(
          metadata.get("batchId").toLong,
          metadata.get("streamName"),
          shardInfoMap))
    } catch {
      case NonFatal(x) => throw new IllegalArgumentException(x)
    }
  }

  def getMap(shardInfos: Array[ShardInfo]): Map[String, ShardInfo] = {
    shardInfos.map {
      s: ShardInfo => (s.shardId -> s)
    }.toMap
  }

} 
Example 22
Source File: JsonUtils.scala    From pulsar-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.pulsar

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.pulsar.client.api.MessageId


object JsonUtils {

  private implicit val formats = Serialization.formats(NoTypeHints)

  def topics(str: String): Array[String] = {
    Serialization.read[Array[String]](str)
  }

  def topics(topics: Array[String]): String = {
    Serialization.write(topics)
  }

  def topicOffsets(str: String): Map[String, MessageId] = {
    Serialization.read[Map[String, Array[Byte]]](str).map {
      case (topic, msgIdBytes) =>
        (topic, MessageId.fromByteArray(msgIdBytes))
    }
  }

  def topicOffsets(topicOffsets: Map[String, MessageId]): String = {
    Serialization.write(topicOffsets.map {
      case (topic, msgId) =>
        (topic, msgId.toByteArray)
    })
  }

  def topicTimes(topicTimes: Map[String, Long]): String = {
    Serialization.write(topicTimes)
  }

  def topicTimes(str: String): Map[String, Long] = {
    Serialization.read[Map[String, Long]](str)
  }
} 
Example 23
Source File: RoutingHandler.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http.routing

import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model._
import akka.http.scaladsl.server._
import akka.http.scaladsl.settings.RoutingSettings
import com.github.vonnagy.service.container.http.routing.Rejection.{DuplicateRejection, NotFoundRejection}
import com.github.vonnagy.service.container.http.{DefaultMarshallers, RejectionResponse}
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.typesafe.config.Config
import org.json4s.jackson.Serialization

import scala.collection.immutable
import scala.util.control.NonFatal

trait RoutingHandler extends Directives with DefaultMarshallers with LoggingAdapter {

  def conf: Config
  implicit val routeSettings = RoutingSettings(conf)
  implicit val marshaller: ToEntityMarshaller[AnyRef] = jsonMarshaller

  
  implicit val rejectionHandler = new RejectionHandler {
    val orig = RejectionHandler.newBuilder()
      .handle { case NotFoundRejection(errorMsg) => complete(NotFound, errorMsg) }
      .handle { case DuplicateRejection(errorMsg) => complete(BadRequest, errorMsg) }
      .handle { case MalformedRequestContentRejection(errorMsg, _) => complete(UnprocessableEntity, errorMsg) }
      .handleNotFound { complete((NotFound, "The requested resource could not be found.")) }
      .result
      .seal

    def apply(v1: immutable.Seq[Rejection]): Option[Route] = {
      val originalResult = orig(v1).getOrElse(complete(StatusCodes.InternalServerError))

      Some(mapResponse(transformExceptionRejection) {
        originalResult
      })
    }
  }

  private def transformExceptionRejection(response: HttpResponse): HttpResponse = {
    response.entity match {
      // If the entity isn't Strict (and it definitely will be), don't bother
      // converting, just throw an error, because something's weird.
      case strictEntity: HttpEntity.Strict =>
        val rej = RejectionResponse(response.status.intValue, response.status.defaultMessage,
          strictEntity.data.utf8String)

        response.withEntity(HttpEntity(ContentType(MediaTypes.`application/json`),
          Serialization.write(rej)))

      case _ =>
        throw new Exception("Unexpected entity type")
    }
  }

} 
Example 24
Source File: StreamMetadata.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets
import java.util.ConcurrentModificationException

import scala.util.control.NonFatal

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileAlreadyExistsException, FSDataInputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.streaming.CheckpointFileManager.CancellableFSDataOutputStream
import org.apache.spark.sql.streaming.StreamingQuery


  def write(
      metadata: StreamMetadata,
      metadataFile: Path,
      hadoopConf: Configuration): Unit = {
    var output: CancellableFSDataOutputStream = null
    try {
      val fileManager = CheckpointFileManager.create(metadataFile.getParent, hadoopConf)
      output = fileManager.createAtomic(metadataFile, overwriteIfPossible = false)
      val writer = new OutputStreamWriter(output)
      Serialization.write(metadata, writer)
      writer.close()
    } catch {
      case e: FileAlreadyExistsException =>
        if (output != null) {
          output.cancel()
        }
        throw new ConcurrentModificationException(
          s"Multiple streaming queries are concurrently using $metadataFile", e)
      case e: Throwable =>
        if (output != null) {
          output.cancel()
        }
        logError(s"Error writing stream metadata $metadata to $metadataFile", e)
        throw e
    }
  }
} 
Example 25
Source File: FileStreamSinkLog.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import org.apache.hadoop.fs.{FileStatus, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.{read, write}

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.SQLConf


class FileStreamSinkLog(
    metadataLogVersion: String,
    sparkSession: SparkSession,
    path: String)
  extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) {

  private implicit val formats = Serialization.formats(NoTypeHints)

  protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay

  protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion

  protected override val compactInterval = sparkSession.sessionState.conf.fileSinkLogCompactInterval
  require(compactInterval > 0,
    s"Please set ${SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key} (was $compactInterval) " +
      "to a positive value.")

  protected override def serializeData(data: SinkFileStatus): String = {
    write(data)
  }

  protected override def deserializeData(encodedString: String): SinkFileStatus = {
    read[SinkFileStatus](encodedString)
  }

  override def compactLogs(logs: Seq[SinkFileStatus]): Seq[SinkFileStatus] = {
    val deletedFiles = logs.filter(_.action == FileStreamSinkLog.DELETE_ACTION).map(_.path).toSet
    if (deletedFiles.isEmpty) {
      logs
    } else {
      logs.filter(f => !deletedFiles.contains(f.path))
    }
  }
}

object FileStreamSinkLog {
  val VERSION = "v1"
  val DELETE_ACTION = "delete"
  val ADD_ACTION = "add"
} 
Example 26
Source File: RPCConsumer.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.rpc.transform

import com.webank.wedatasphere.linkis.common.exception.ExceptionManager
import com.webank.wedatasphere.linkis.common.utils.Utils
import com.webank.wedatasphere.linkis.rpc.exception.DWCURIException
import com.webank.wedatasphere.linkis.server.{BDPJettyServerHelper, JMap, Message, EXCEPTION_MSG}
import org.json4s.jackson.Serialization

import scala.reflect.ManifestFactory
import scala.runtime.BoxedUnit


private[linkis] trait RPCConsumer {

  def toObject(message: Message): Any

}
private[linkis] object RPCConsumer {
  import RPCProduct._
  private val rpcConsumer: RPCConsumer = new RPCConsumer {
    override def toObject(message: Message): Any = {
      message.getStatus match {
        case 0 =>
          val data = message.getData
          if(data.isEmpty) return BoxedUnit.UNIT
          val objectStr = data.get(OBJECT_VALUE).toString
          val objectClass = data.get(CLASS_VALUE).toString
          val clazz = Utils.tryThrow(Class.forName(objectClass)){
            case _: ClassNotFoundException =>
              new DWCURIException(10003, s"The corresponding anti-sequence class $objectClass was not found.(找不到对应的反序列类$objectClass.)")
            case t: ExceptionInInitializerError =>
              val exception = new DWCURIException(10004, s"The corresponding anti-sequence class ${objectClass} failed to initialize.(对应的反序列类${objectClass}初始化失败.)")
              exception.initCause(t)
              exception
            case t: Throwable => t
          }
          if(data.get(IS_SCALA_CLASS).toString.toBoolean) {
            val realClass = getSerializableScalaClass(clazz)
            Serialization.read(objectStr)(formats, ManifestFactory.classType(realClass))
          } else {
            BDPJettyServerHelper.gson.fromJson(objectStr, clazz)
          }
        case 4 =>
          val errorMsg = message.getData.get(EXCEPTION_MSG).asInstanceOf[JMap[String, Object]]
          ExceptionManager.generateException(errorMsg)
        case _ =>
          val errorMsg = message.getData.get(EXCEPTION_MSG)
          if(errorMsg == null) throw new DWCURIException(10005, message.getMessage)
          val realError = ExceptionManager.generateException(errorMsg.asInstanceOf[JMap[String, Object]])
          throw realError;
      }
    }
  }
  def getRPCConsumer: RPCConsumer = rpcConsumer
} 
Example 27
Source File: RPCProduct.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.rpc.transform

import java.lang.reflect.{ParameterizedType, Type}
import java.util

import com.webank.wedatasphere.linkis.DataWorkCloudApplication
import com.webank.wedatasphere.linkis.common.utils.Logging
import com.webank.wedatasphere.linkis.rpc.exception.DWCURIException
import com.webank.wedatasphere.linkis.server.{BDPJettyServerHelper, EXCEPTION_MSG, Message}
import org.apache.commons.lang.ClassUtils
import org.json4s.jackson.Serialization
import org.json4s.{DefaultFormats, Formats, Serializer}

import scala.collection.JavaConversions


private[linkis] trait RPCProduct {

  def toMessage(t: Any): Message

  def notFound(): Message

  def ok(): Message

}
private[linkis] object RPCProduct extends Logging {
  private[rpc] val IS_SCALA_CLASS = "rpc_is_scala_class"
  private[rpc] val CLASS_VALUE = "rpc_object_class"
  private[rpc] val OBJECT_VALUE = "rpc_object_value"
  private[rpc] implicit var formats: Formats = DefaultFormats + JavaCollectionSerializer + JavaMapSerializer
  private var serializerClasses: List[Class[_]] = List.empty
  private val rpcProduct: RPCProduct = new RPCProduct {
    private val rpcFormats = DataWorkCloudApplication.getApplicationContext.getBeansOfType(classOf[RPCFormats])
    if(rpcFormats != null && !rpcFormats.isEmpty) {
      val serializers = JavaConversions.mapAsScalaMap(rpcFormats).map(_._2.getSerializers).toArray.flatMap(_.iterator)
      setFormats(serializers)
    }
    override def toMessage(t: Any): Message = {
      if(t == null) throw new DWCURIException(10001, "The transmitted bean is Null.(传输的bean为Null.)")
      val message = Message.ok("RPC Message.")
      if(isScalaClass(t)){
        message.data(IS_SCALA_CLASS, "true")
        message.data(OBJECT_VALUE, Serialization.write(t.asInstanceOf[AnyRef]))
      } else {
        message.data(IS_SCALA_CLASS, "false")
        message.data(OBJECT_VALUE, BDPJettyServerHelper.gson.toJson(t))
      }
      message.setMethod("/rpc/message")
      message.data(CLASS_VALUE, t.getClass.getName)
    }

    override def notFound(): Message = {
      val message = Message.error("RPC Message.")
      message.setMethod("/rpc/message")
      message.data(EXCEPTION_MSG, new DWCURIException(10000, "The service does not exist for the available Receiver.(服务不存在可用的Receiver.)").toMap)
    }

    override def ok(): Message = {
      val message = Message.ok("RPC Message.")
      message.setMethod("/rpc/message")
      message
    }
  }
  private[rpc] def setFormats(serializer: Array[Serializer[_]]): Unit ={
    this.formats = (serializer :+ JavaCollectionSerializer :+ JavaMapSerializer).foldLeft(DefaultFormats.asInstanceOf[Formats])(_ + _)
    serializerClasses = formats.customSerializers.map(s => getActualTypeClass(s.getClass.getGenericSuperclass))
      .filter(_ != null) ++: List(classOf[util.List[_]], classOf[util.Map[_, _]])
    info("RPC Serializers: " + this.formats.customSerializers.map(_.getClass.getSimpleName) + ", serializerClasses: " +
      "" + serializerClasses)
  }
  private def getActualTypeClass(classType: Type): Class[_] = classType match {
    case p: ParameterizedType =>
      val params = p.getActualTypeArguments
      if(params == null || params.isEmpty) null
      else getActualTypeClass(params(0))
    case c: Class[_] => c
    case _ => null
  }
  private[rpc] def isScalaClass(obj: Any): Boolean =
    (obj.isInstanceOf[Product] && obj.isInstanceOf[Serializable]) ||
      serializerClasses.exists(ClassUtils.isAssignable(obj.getClass, _)) ||
        obj.getClass.getName.startsWith("scala.")
  private[rpc] def getSerializableScalaClass(clazz: Class[_]): Class[_] =
    serializerClasses.find(ClassUtils.isAssignable(clazz, _)).getOrElse(clazz)
  def getRPCProduct: RPCProduct = rpcProduct
} 
Example 28
Source File: RPCFormatsTest.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.rpc

import java.lang.reflect.ParameterizedType
import java.util

import com.webank.wedatasphere.linkis.rpc.transform.{JavaCollectionSerializer, JavaMapSerializer}
import org.apache.commons.lang.ClassUtils
import org.json4s.JsonAST.JObject
import org.json4s.JsonDSL._
import org.json4s.jackson.Serialization
import org.json4s.reflect.ManifestFactory
import org.json4s.{CustomSerializer, DefaultFormats, Extraction}

object RPCFormatsTest {

  trait ResultResource
  class AvailableResource(val ticketId: String) extends ResultResource

  object ResultResourceSerializer extends CustomSerializer[ResultResource](implicit formats => ({
    case JObject(List(("AvailableResource", JObject(List(("ticketId", ticketId)))))) => new AvailableResource(ticketId.extract[String])
  },{
    case r: AvailableResource => ("AvailableResource", ("ticketId", Extraction.decompose(r.ticketId)))
  }))

  def testRPC1(args: Array[String]): Unit = {
    implicit val formats = DefaultFormats + ResultResourceSerializer
    val serializerClasses = formats.customSerializers.map(_.getClass.getGenericSuperclass match {
      case p: ParameterizedType =>
        val params = p.getActualTypeArguments
        if(params == null || params.isEmpty) null
        else params(0).asInstanceOf[Class[_]]
    }).filter(_ != null)
    val a = new AvailableResource("aaa")
    val str = Serialization.write(a)
    println(str)
    val clazz = classOf[AvailableResource]
    println(serializerClasses)
    val realClass1 = serializerClasses.find(ClassUtils.isAssignable(clazz, _))
    println(realClass1)
    val realClass = realClass1.getOrElse(clazz)
    val obj = Serialization.read(str)(formats, ManifestFactory.manifestOf(realClass))
    println(obj)
    println(classOf[Array[_]].getClass.getName)
  }

  case class TestCollection1(a: String, list: java.util.List[String])
  case class TestCollection2(a: String, list: java.util.Map[String, Integer])

  def testRPC2(args: Array[String]): Unit = {
    implicit val formats = DefaultFormats + JavaCollectionSerializer + JavaMapSerializer
    //    val a = TestCollection1("1", new util.ArrayList[String]())
    val a = TestCollection2("1", new util.HashMap[String, Integer]())
    //    a.list.add("1111")
    a.list.put("1111", 2)
    val str = Serialization.write(a)
    println(str)
    val realClass = classOf[TestCollection2]
    val obj = Serialization.read(str)(formats, ManifestFactory.manifestOf(realClass))
    println(obj)
  }

  def main(args: Array[String]): Unit = {
    testRPC2(args)
  }
} 
Example 29
Source File: ApiHandler.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.api

import akka.actor.ActorSystem
import liigavoitto.fetch.{ScoresFetcher, ScoresFromFilesFetcher}
import liigavoitto.journalist.LiigaJournalist
import liigavoitto.scores.ScoresApiClient
import liigavoitto.util.DateTimeNoMillisSerializer
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.write

class ApiHandler(implicit val system: ActorSystem) {
  implicit val formats = Serialization.formats(NoTypeHints) + DateTimeNoMillisSerializer

  val api = new ScoresApiClient()

  def report(matchId: String, lang: String) = {
    val fetcher = new ScoresFetcher(matchId, api)
    val article = LiigaJournalist.createArticle(fetcher.getEnrichedMatchData, lang)
    write(article)
  }

  def localReport(matchId: String, lang: String) = {
    val fetcher = new ScoresFromFilesFetcher(matchId)
    val article = LiigaJournalist.createArticle(fetcher.getEnrichedMatchData, lang)
    write(article)
  }
} 
Example 30
Source File: Slack.scala    From amadou   with Apache License 2.0 5 votes vote down vote up
package com.mediative.amadou

import org.apache.http.client.methods.HttpPost
import org.apache.http.entity.{ContentType, StringEntity}
import org.apache.http.impl.client.HttpClients
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.{write}

object Slack {
  case class PostException(msg: String) extends RuntimeException(msg)

  case class Payload(
      channel: String,
      text: String,
      username: String,
      icon_emoji: String,
      link_names: Boolean)
}


  def post(message: String, icon: String = this.icon): Unit = {
    val payload = Payload(channel, message, user, icon, true)
    logger.info(s"Posting $payload to $url")

    val client        = HttpClients.createDefault()
    val requestEntity = new StringEntity(write(payload), ContentType.APPLICATION_JSON)
    val postMethod    = new HttpPost(url)
    postMethod.setEntity(requestEntity)

    val response = client.execute(postMethod)
    client.close()
    val status = response.getStatusLine
    if (status.getStatusCode != 200)
      throw PostException(
        s"$url replied with status ${status.getStatusCode}: ${status.getReasonPhrase}")
  }
} 
Example 31
Source File: RedisSourceOffset.scala    From spark-redis   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package org.apache.spark.sql.redis.stream

import com.redislabs.provider.redis.util.JsonUtils
import org.apache.spark.sql.execution.streaming.{Offset, SerializedOffset}
import org.json4s.jackson.Serialization
import org.json4s.{Formats, NoTypeHints}


case class RedisSourceOffset(offsets: Map[String, RedisConsumerOffset]) extends Offset {

  override def json(): String = JsonUtils.toJson(this)
}

object RedisSourceOffset {

  private implicit val formats: Formats = Serialization.formats(NoTypeHints)

  def fromOffset(offset: Offset): RedisSourceOffset = {
    offset match {
      case o: RedisSourceOffset => o
      case so: SerializedOffset => fromJson(so.json)
      case _ =>
        throw new IllegalArgumentException(
          s"Invalid conversion from offset of ${offset.getClass} to RedisSourceOffset")
    }

    fromJson(offset.json())
  }

  def fromJson(json: String): RedisSourceOffset = {
    try {
      Serialization.read[RedisSourceOffset](json)
    } catch {
      case e: Throwable =>
        val example = RedisSourceOffset(Map("my-stream" -> RedisConsumerOffset("redis-source", "1543674099961-0")))
        val jsonExample = Serialization.write(example)
        throw new RuntimeException(s"Unable to parse offset json. Example of valid json: $jsonExample", e)
    }
  }
}

case class RedisConsumerOffset(groupName: String, offset: String)

case class RedisSourceOffsetRange(start: Option[String], end: String, config: RedisConsumerConfig) 
Example 32
Source File: DataSourceUtils.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources

import org.apache.hadoop.fs.Path
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.types._


object DataSourceUtils {

  
  private def verifySchema(format: FileFormat, schema: StructType, isReadPath: Boolean): Unit = {
    schema.foreach { field =>
      if (!format.supportDataType(field.dataType, isReadPath)) {
        throw new AnalysisException(
          s"$format data source does not support ${field.dataType.catalogString} data type.")
      }
    }
  }

  // SPARK-24626: Metadata files and temporary files should not be
  // counted as data files, so that they shouldn't participate in tasks like
  // location size calculation.
  private[sql] def isDataPath(path: Path): Boolean = {
    val name = path.getName
    !(name.startsWith("_") || name.startsWith("."))
  }
} 
Example 33
Source File: FileStreamSourceOffset.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import scala.util.control.Exception._

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


case class FileStreamSourceOffset(logOffset: Long) extends Offset {
  override def json: String = {
    Serialization.write(this)(FileStreamSourceOffset.format)
  }
}

object FileStreamSourceOffset {
  implicit val format = Serialization.formats(NoTypeHints)

  def apply(offset: Offset): FileStreamSourceOffset = {
    offset match {
      case f: FileStreamSourceOffset => f
      case SerializedOffset(str) =>
        catching(classOf[NumberFormatException]).opt {
          FileStreamSourceOffset(str.toLong)
        }.getOrElse {
          Serialization.read[FileStreamSourceOffset](str)
        }
      case _ =>
        throw new IllegalArgumentException(
          s"Invalid conversion from offset of ${offset.getClass} to FileStreamSourceOffset")
    }
  }
} 
Example 34
Source File: FileStreamSinkLog.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.net.URI

import org.apache.hadoop.fs.{FileStatus, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.SQLConf


class FileStreamSinkLog(
    metadataLogVersion: Int,
    sparkSession: SparkSession,
    path: String)
  extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) {

  private implicit val formats = Serialization.formats(NoTypeHints)

  protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay

  protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion

  protected override val defaultCompactInterval =
    sparkSession.sessionState.conf.fileSinkLogCompactInterval

  require(defaultCompactInterval > 0,
    s"Please set ${SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key} (was $defaultCompactInterval) " +
      "to a positive value.")

  override def compactLogs(logs: Seq[SinkFileStatus]): Seq[SinkFileStatus] = {
    val deletedFiles = logs.filter(_.action == FileStreamSinkLog.DELETE_ACTION).map(_.path).toSet
    if (deletedFiles.isEmpty) {
      logs
    } else {
      logs.filter(f => !deletedFiles.contains(f.path))
    }
  }
}

object FileStreamSinkLog {
  val VERSION = 1
  val DELETE_ACTION = "delete"
  val ADD_ACTION = "add"
} 
Example 35
Source File: JsonUtils.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import java.io.Writer

import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.kafka.common.TopicPartition
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


  def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
    val result = new HashMap[String, HashMap[Int, Long]]()
    partitionOffsets.foreach { case (tp, off) =>
        val parts = result.getOrElse(tp.topic, new HashMap[Int, Long])
        parts += tp.partition -> off
        result += tp.topic -> parts
    }
    Serialization.write(result)
  }
} 
Example 36
Source File: CommitLog.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStream, OutputStream}
import java.nio.charset.StandardCharsets._

import scala.io.{Source => IOSource}

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.sql.SparkSession


class CommitLog(sparkSession: SparkSession, path: String)
  extends HDFSMetadataLog[CommitMetadata](sparkSession, path) {

  import CommitLog._

  override protected def deserialize(in: InputStream): CommitMetadata = {
    // called inside a try-finally where the underlying stream is closed in the caller
    val lines = IOSource.fromInputStream(in, UTF_8.name()).getLines()
    if (!lines.hasNext) {
      throw new IllegalStateException("Incomplete log file in the offset commit log")
    }
    parseVersion(lines.next.trim, VERSION)
    val metadataJson = if (lines.hasNext) lines.next else EMPTY_JSON
    CommitMetadata(metadataJson)
  }

  override protected def serialize(metadata: CommitMetadata, out: OutputStream): Unit = {
    // called inside a try-finally where the underlying stream is closed in the caller
    out.write(s"v${VERSION}".getBytes(UTF_8))
    out.write('\n')

    // write metadata
    out.write(metadata.json.getBytes(UTF_8))
  }
}

object CommitLog {
  private val VERSION = 1
  private val EMPTY_JSON = "{}"
}


case class CommitMetadata(nextBatchWatermarkMs: Long = 0) {
  def json: String = Serialization.write(this)(CommitMetadata.format)
}

object CommitMetadata {
  implicit val format = Serialization.formats(NoTypeHints)

  def apply(json: String): CommitMetadata = Serialization.read[CommitMetadata](json)
} 
Example 37
Source File: StreamStreamDataGenerator.scala    From structured-streaming-application   with Apache License 2.0 5 votes vote down vote up
package knolx.kafka

import java.util.Properties

import akka.actor.ActorSystem
import knolx.Config._
import knolx.KnolXLogger
import knolx.spark.Stock
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringSerializer
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.write

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.DurationInt
import scala.util.Random


object StreamStreamDataGenerator extends App with KnolXLogger {
  val system = ActorSystem("DataStreamer")
  val props = new Properties()
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer)
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getName)
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getName)

  val producer = new KafkaProducer[String, String](props)

  val companyNames = List("kirloskar", "bajaj", "amul", "dlf", "ebay")
  val orderTypes = List("buy", "sell")
  val numberOfSharesList = List(1, 2, 3, 4, 5, 6, 7, 8, 9)
  val randomCompanyNames = Random.shuffle(companyNames).drop(Random.shuffle((1 to 3).toList).head)

  implicit val formats = Serialization.formats(NoTypeHints)

  info("Streaming companies listed into Kafka...")
  system.scheduler.schedule(0 seconds, 20 seconds) {
    randomCompanyNames.foreach { name =>
      producer.send(new ProducerRecord[String, String](companiesTopic, name))
    }
  }

  info("Streaming stocks data into Kafka...")
  system.scheduler.schedule(0 seconds, 5 seconds) {
    companyNames.foreach { name =>
      val stock = Stock(name, Random.shuffle(numberOfSharesList).head, Random.shuffle(orderTypes).head)
      producer.send(new ProducerRecord[String, String](stocksTopic, write(stock)))
    }
  }
} 
Example 38
Source File: StreamStaticDataGenerator.scala    From structured-streaming-application   with Apache License 2.0 5 votes vote down vote up
package knolx.kafka

import java.util.Properties

import akka.actor.ActorSystem
import knolx.Config.{bootstrapServer, topic}
import knolx.KnolXLogger
import knolx.spark.Stock
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringSerializer
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.write

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.DurationInt
import scala.util.Random


object StreamStaticDataGenerator extends App with KnolXLogger {
  val system = ActorSystem("DataStreamer")
  val props = new Properties()
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer)
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getName)
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer].getName)

  val producer = new KafkaProducer[String, String](props)

  val companyNames = List("kirloskar", "bajaj", "amul", "dlf", "ebay")
  val orderTypes = List("buy", "sell")
  val numberOfSharesList = List(1, 2, 3, 4, 5, 6, 7, 8, 9)

  implicit val formats = Serialization.formats(NoTypeHints)
  info("Streaming data into Kafka...")
  system.scheduler.schedule(0 seconds, 5 seconds) {
    companyNames.foreach { name =>
      val stock = Stock(name, Random.shuffle(numberOfSharesList).head, Random.shuffle(orderTypes).head)
      producer.send(new ProducerRecord[String, String](topic, write(stock)))
    }
  }
} 
Example 39
Source File: package.scala    From azure-event-hubs-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import java.time.Duration

import com.microsoft.azure.eventhubs.{ EventHubClient, EventHubClientOptions, PartitionReceiver }
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


package object eventhubs {

  implicit val formats = Serialization.formats(NoTypeHints)

  val StartOfStream: String = "-1"
  val EndOfStream: String = "@latest"
  val DefaultEventPosition: EventPosition = EventPosition.fromEndOfStream
  val DefaultEndingPosition: EventPosition = EventPosition.fromEndOfStream
  val DefaultMaxRatePerPartition: Rate = 1000
  val DefaultReceiverTimeout: Duration = Duration.ofSeconds(60)
  val DefaultMaxSilentTime: Duration = EventHubClientOptions.SILENT_OFF
  val MinSilentTime: Duration = EventHubClientOptions.SILENT_MINIMUM
  val DefaultOperationTimeout: Duration = Duration.ofSeconds(300)
  val DefaultConsumerGroup: String = EventHubClient.DEFAULT_CONSUMER_GROUP_NAME
  val PrefetchCountMinimum: Int = PartitionReceiver.MINIMUM_PREFETCH_COUNT
  val PrefetchCountMaximum: Int = PartitionReceiver.MAXIMUM_PREFETCH_COUNT
  val DefaultPrefetchCount: Int = PartitionReceiver.DEFAULT_PREFETCH_COUNT
  val DefaultFailOnDataLoss = "true"
  val DefaultUseSimulatedClient = "false"
  val DefaultPartitionPreferredLocationStrategy = "Hash"
  val DefaultUseExclusiveReceiver = "true"
  val StartingSequenceNumber = 0L
  val DefaultThreadPoolSize = 16
  val DefaultEpoch = 0L
  val RetryCount = 10
  val WaitInterval = 5000

  val OffsetAnnotation = "x-opt-offset"
  val EnqueuedTimeAnnotation = "x-opt-enqueued-time"
  val SequenceNumberAnnotation = "x-opt-sequence-number"

  val SparkConnectorVersion = "2.3.16"

  type PartitionId = Int
  val PartitionId: Int.type = Int

  type Rate = Int
  val Rate: Int.type = Int

  type Offset = Long
  val Offset: Long.type = Long

  type EnqueueTime = Long
  val EnqueueTime: Long.type = Long

  type SequenceNumber = Long
  val SequenceNumber: Long.type = Long

  object PartitionPreferredLocationStrategy extends Enumeration {
    type PartitionPreferredLocationStrategy = Value
    val Hash, BalancedHash = Value
  }

  // Allow Strings to be converted to types defined in this library.
  implicit class EventHubsString(val str: String) extends AnyVal {
    def toPartitionId: PartitionId = str.toInt

    def toRate: Rate = str.toInt

    def toOffset: Offset = str.toLong

    def toEnqueueTime: EnqueueTime = str.toLong

    def toSequenceNumber: SequenceNumber = str.toLong
  }
} 
Example 40
Source File: JsonUtils.scala    From azure-event-hubs-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.eventhubs

import org.apache.spark.eventhubs.{ NameAndPartition, _ }
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import scala.collection.mutable
import scala.util.control.NonFatal


  def partitionSeqNos(jsonStr: String): Map[NameAndPartition, SequenceNumber] = {
    try {
      Serialization.read[Map[String, Map[PartitionId, SequenceNumber]]](jsonStr).flatMap {
        case (name, partSeqNos) =>
          partSeqNos.map {
            case (part, seqNo) =>
              NameAndPartition(name, part) -> seqNo
          }
      }
    } catch {
      case NonFatal(_) =>
        throw new IllegalArgumentException(
          s"failed to parse $jsonStr" +
            s"""Expected e.g. {"ehName":{"0":23,"1":-1},"ehNameB":{"0":-2}}""")
    }
  }
} 
Example 41
Source File: JsonUtils.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import scala.collection.mutable.HashMap
import scala.util.control.NonFatal

import org.apache.kafka.common.TopicPartition
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


  def partitionOffsets(partitionOffsets: Map[TopicPartition, Long]): String = {
    val result = new HashMap[String, HashMap[Int, Long]]()
    implicit val ordering = new Ordering[TopicPartition] {
      override def compare(x: TopicPartition, y: TopicPartition): Int = {
        Ordering.Tuple2[String, Int].compare((x.topic, x.partition), (y.topic, y.partition))
      }
    }
    val partitions = partitionOffsets.keySet.toSeq.sorted  // sort for more determinism
    partitions.foreach { tp =>
        val off = partitionOffsets(tp)
        val parts = result.getOrElse(tp.topic, new HashMap[Int, Long])
        parts += tp.partition -> off
        result += tp.topic -> parts
    }
    Serialization.write(result)
  }
} 
Example 42
Source File: FileStreamSourceOffset.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import scala.util.control.Exception._

import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization


case class FileStreamSourceOffset(logOffset: Long) extends Offset {
  override def json: String = {
    Serialization.write(this)(FileStreamSourceOffset.format)
  }
}

object FileStreamSourceOffset {
  implicit val format = Serialization.formats(NoTypeHints)

  def apply(offset: Offset): FileStreamSourceOffset = {
    offset match {
      case f: FileStreamSourceOffset => f
      case SerializedOffset(str) =>
        catching(classOf[NumberFormatException]).opt {
          FileStreamSourceOffset(str.toLong)
        }.getOrElse {
          Serialization.read[FileStreamSourceOffset](str)
        }
      case _ =>
        throw new IllegalArgumentException(
          s"Invalid conversion from offset of ${offset.getClass} to FileStreamSourceOffset")
    }
  }
} 
Example 43
Source File: FileStreamSinkLog.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import org.apache.hadoop.fs.{FileStatus, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.{read, write}

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.SQLConf


class FileStreamSinkLog(
    metadataLogVersion: String,
    sparkSession: SparkSession,
    path: String)
  extends CompactibleFileStreamLog[SinkFileStatus](metadataLogVersion, sparkSession, path) {

  private implicit val formats = Serialization.formats(NoTypeHints)

  protected override val fileCleanupDelayMs = sparkSession.sessionState.conf.fileSinkLogCleanupDelay

  protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSinkLogDeletion

  protected override val defaultCompactInterval =
    sparkSession.sessionState.conf.fileSinkLogCompactInterval

  require(defaultCompactInterval > 0,
    s"Please set ${SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key} (was $defaultCompactInterval) " +
      "to a positive value.")

  override def compactLogs(logs: Seq[SinkFileStatus]): Seq[SinkFileStatus] = {
    val deletedFiles = logs.filter(_.action == FileStreamSinkLog.DELETE_ACTION).map(_.path).toSet
    if (deletedFiles.isEmpty) {
      logs
    } else {
      logs.filter(f => !deletedFiles.contains(f.path))
    }
  }
}

object FileStreamSinkLog {
  val VERSION = "v1"
  val DELETE_ACTION = "delete"
  val ADD_ACTION = "add"
} 
Example 44
Source File: StreamMetadata.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets

import scala.util.control.NonFatal

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataInputStream, FSDataOutputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization

import org.apache.spark.internal.Logging
import org.apache.spark.sql.streaming.StreamingQuery


  def write(
      metadata: StreamMetadata,
      metadataFile: Path,
      hadoopConf: Configuration): Unit = {
    var output: FSDataOutputStream = null
    try {
      val fs = FileSystem.get(hadoopConf)
      output = fs.create(metadataFile)
      val writer = new OutputStreamWriter(output)
      Serialization.write(metadata, writer)
      writer.close()
    } catch {
      case NonFatal(e) =>
        logError(s"Error writing stream metadata $metadata to $metadataFile", e)
        throw e
    } finally {
      IOUtils.closeQuietly(output)
    }
  }
} 
Example 45
Source File: TagProcessor.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut.gherkin.processors

import org.json4s.jackson.Serialization
import org.json4s.{Formats, NoTypeHints, jackson}
import report.donut.gherkin.model._

case class ReportTag(tag: String,
                     scenarios: List[Scenario],
                     scenariosMetrics: Metrics,
                     tagStatus: String,
                     htmlElements: String = "")

case class TagMetricsForChart(tag: String, scenariosMetrics: Metrics)

object TagProcessor {

  def apply(features: List[Feature]): (List[ReportTag], String) = {

    val allReportTags = createAllReportTags(features)

    (allReportTags, createChart(allReportTags))
  }

  private[processors] def createChart(reportTags: List[ReportTag]): String = {
    implicit def json4sJacksonFormats: Formats = jackson.Serialization.formats(NoTypeHints)

    Serialization.writePretty(reportTags.map(t => new TagMetricsForChart(t.tag, t.scenariosMetrics)))
  }

  private[processors] def createAllReportTags(features: List[Feature]): List[ReportTag] = {
    val scenarios: List[Scenario] = features.flatMap(f => addFeatureTagsToScenarios(f.scenariosExcludeBackgroundAndUnitTests, f.tags))
    groupElementsByTag(scenarios)
      .map { case (tag, scenarioList) => new ReportTag(tag, scenarioList, ScenarioMetrics(scenarioList), tagStatus(scenarioList)) }.toList
      .zipWithIndex.map { case (t, i) => t.copy(htmlElements = HTMLTagsProcessor(t.scenarios, i.toString.trim)) }
      .sortWith((left, right) => left.scenariosMetrics.total > right.scenariosMetrics.total)
  }

  // tagName -> List[Elements], excluding background elements
  private[processors] def groupElementsByTag(scenarios: List[Scenario]): Map[String, List[Scenario]] =
    scenarios.flatMap(s => s.tags.map(tag => (tag, s))).groupBy(_._1).mapValues(value => value.map(_._2))

  // cucumber 1 backwards compatibility - adds the parent (feature) tag to all children (scenarios)
  private[processors] def addFeatureTagsToScenarios(scenarios: List[Scenario], featureTags: List[String]): List[Scenario] =
    scenarios.map(e => e.copy(tags = (e.tags ::: featureTags).distinct))

  // Returns `passed` or `failed`
  private[processors] def tagStatus(scenarios: List[Scenario]): String = {
    val statuses = scenarios.map(s => s.status.statusStr)
    if (statuses.contains(Status.FAILED)) Status.FAILED else Status.PASSED
  }
}