javax.ws.rs.Produces Scala Examples

The following examples show how to use javax.ws.rs.Produces. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: JacksonMessageWriter.scala    From drizzle-spark   with Apache License 2.0 6 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.nio.charset.StandardCharsets
import java.text.SimpleDateFormat
import java.util.{Calendar, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes(StandardCharsets.UTF_8))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'")
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
} 
Example 2
Source File: JacksonMessageWriter.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.text.SimpleDateFormat
import java.util.{Calendar, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes("utf-8"))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'")
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
} 
Example 3
Source File: OneJobResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 4
Source File: ExecutorListResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    val storageStatusList = listener.storageStatusList
    (0 until storageStatusList.size).map { statusId =>
      ExecutorsPage.getExecInfo(listener, statusId)
    }
  }
} 
Example 5
Source File: JacksonMessageWriter.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.text.SimpleDateFormat
import java.util.{Calendar, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes("utf-8"))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'")
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
} 
Example 6
Source File: AllRDDResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.storageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.storageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 7
Source File: EventLogDownloadResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }
      //实现StreamingOutput接口
      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          //ZipOutputStream实现打包
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 8
Source File: ApplicationListResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo
import org.apache.spark.deploy.master.{ApplicationInfo => InternalApplicationInfo}

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam)
  : Iterator[ApplicationInfo] = {
    val allApps = uiRoot.getApplicationInfoList
    val adjStatus = {
      if (status.isEmpty) {
        //可变参数时不能直接传入Range或集合或数组对象,需要使用:_*转换才可传入
        Arrays.asList(ApplicationStatus.values(): _*)
      } else {
        status
      }
    }
    val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED)
    val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING)
    allApps.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running
      //如果有任何尝试仍在运行,我们认为该应用仍然在运行
      val statusOk = (!anyRunning && includeCompleted) ||
        (anyRunning && includeRunning)
      // keep the app if *any* attempts fall in the right time window
      //如果*任何*尝试落在正确的时间窗口,请保留应用程序
      val dateOk = app.attempts.exists { attempt =>
        attempt.startTime.getTime >= minDate.timestamp &&
          attempt.startTime.getTime <= maxDate.timestamp
      }
      statusOk && dateOk
    }
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }

  def convertApplicationInfo(
      internal: InternalApplicationInfo,
      completed: Boolean): ApplicationInfo = {
    // standalone application info always has just one attempt
    //独立的应用信息总是只有一个尝试
    new ApplicationInfo(
      id = internal.id,
      name = internal.desc.name,
      attempts = Seq(new ApplicationAttemptInfo(
        attemptId = None,
        startTime = new Date(internal.startTime),
        endTime = new Date(internal.endTime),
        sparkUser = internal.desc.user,
        completed = completed
      ))
    )
  }

} 
Example 9
Source File: OneRDDResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
//注解@Produces用于定义方法的响应实体的数据类型,可以定义一个或多个
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneRDDResource(ui: SparkUI) {

  @GET
  def rddData(@PathParam("rddId") rddId: Int): RDDStorageInfo = {
    AllRDDResource.getRDDStorageInfo(rddId, ui.storageListener, true).getOrElse(
      throw new NotFoundException(s"no rdd found w/ id $rddId")
    )
  }

} 
Example 10
Source File: OneJobResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData
//注解@Produces用于定义方法的响应实体的数据类型,可以定义一个或多个
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 11
Source File: ExecutorListResource.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      //以下代码应由“监听器”保护,以确保在查询状态之前不会删除执行程序,参见SPARK-12784。
      val storageStatusList = listener.storageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId)
      }
    }
  }
} 
Example 12
Source File: JacksonMessageWriter.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.nio.charset.StandardCharsets
import java.text.SimpleDateFormat
import java.util.{Calendar, Locale, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes(StandardCharsets.UTF_8))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'", Locale.US)
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
} 
Example 13
Source File: ApplicationListResource.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource extends ApiRequestContext {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @DefaultValue("2010-01-01") @QueryParam("minEndDate") minEndDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxEndDate") maxEndDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        isAttemptInRange(attempt, minDate, maxDate, minEndDate, maxEndDate, anyRunning)
      }
    }.take(numApps)
  }

  private def isAttemptInRange(
      attempt: ApplicationAttemptInfo,
      minStartDate: SimpleDateParam,
      maxStartDate: SimpleDateParam,
      minEndDate: SimpleDateParam,
      maxEndDate: SimpleDateParam,
      anyRunning: Boolean): Boolean = {
    val startTimeOk = attempt.startTime.getTime >= minStartDate.timestamp &&
      attempt.startTime.getTime <= maxStartDate.timestamp
    // If the maxEndDate is in the past, exclude all running apps.
    val endTimeOkForRunning = anyRunning && (maxEndDate.timestamp > System.currentTimeMillis())
    val endTimeOkForCompleted = !anyRunning && (attempt.endTime.getTime >= minEndDate.timestamp &&
      attempt.endTime.getTime <= maxEndDate.timestamp)
    val endTimeOk = endTimeOkForRunning || endTimeOkForCompleted
    startTimeOk && endTimeOk
  }
} 
Example 14
Source File: AllRDDResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.storageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.storageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 15
Source File: AllRDDResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.storageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.storageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.get(blockId).getOrElse(Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 16
Source File: EventLogDownloadResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 17
Source File: ApplicationListResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo
import org.apache.spark.deploy.master.{ApplicationInfo => InternalApplicationInfo}

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam)
  : Iterator[ApplicationInfo] = {
    val allApps = uiRoot.getApplicationInfoList
    val adjStatus = {
      if (status.isEmpty) {
        Arrays.asList(ApplicationStatus.values(): _*)
      } else {
        status
      }
    }
    val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED)
    val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING)
    allApps.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running
      val statusOk = (!anyRunning && includeCompleted) ||
        (anyRunning && includeRunning)
      // keep the app if *any* attempts fall in the right time window
      val dateOk = app.attempts.exists { attempt =>
        attempt.startTime.getTime >= minDate.timestamp &&
          attempt.startTime.getTime <= maxDate.timestamp
      }
      statusOk && dateOk
    }
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }

  def convertApplicationInfo(
      internal: InternalApplicationInfo,
      completed: Boolean): ApplicationInfo = {
    // standalone application info always has just one attempt
    new ApplicationInfo(
      id = internal.id,
      name = internal.desc.name,
      coresGranted = Some(internal.coresGranted),
      maxCores = internal.desc.maxCores,
      coresPerExecutor = internal.desc.coresPerExecutor,
      memoryPerExecutorMB = Some(internal.desc.memoryPerExecutorMB),
      attempts = Seq(new ApplicationAttemptInfo(
        attemptId = None,
        startTime = new Date(internal.startTime),
        endTime = new Date(internal.endTime),
        sparkUser = internal.desc.user,
        completed = completed
      ))
    )
  }

} 
Example 18
Source File: OneJobResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{PathParam, GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.map {_._2} .flatten.find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 19
Source File: ExecutorListResource.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.storageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId)
      }
    }
  }
} 
Example 20
Source File: EchoListService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.echolist

import akka.http.scaladsl.server.{Directives, Route}
import com.example.akka.DefaultJsonFormats
import io.swagger.v3.oas.annotations.Operation
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.parameters.RequestBody
import io.swagger.v3.oas.annotations.responses.ApiResponse
import javax.ws.rs.core.MediaType
import javax.ws.rs.{Consumes, POST, Path, Produces}
import pl.iterators.kebs.json.KebsSpray
import spray.json.RootJsonFormat

@Path("/echolist")
object EchoListService extends Directives with DefaultJsonFormats with KebsSpray {

  case class EchoList(listName: String, values: Seq[String])

  implicit val echoListFormat: RootJsonFormat[EchoList] = jsonFormatN[EchoList]

  val route: Route = echo

  @POST
  @Consumes(Array(MediaType.APPLICATION_JSON))
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Echo List", description = "Echo List",
    requestBody = new RequestBody(content = Array(new Content(schema = new Schema(implementation = classOf[EchoList])))),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Echo List",
        content = Array(new Content(schema = new Schema(implementation = classOf[EchoList])))),
      new ApiResponse(responseCode = "400", description = "Bad Request"))
  )
  def echo: Route =
    path("echolist") {
      post {
        entity(as[EchoList]) { request =>
          complete(request)
        }
      }
    }

} 
Example 21
Source File: AddService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.add

import javax.ws.rs.{Consumes, POST, Path, Produces}
import akka.actor.ActorRef
import akka.http.scaladsl.server.{Directives, Route}
import akka.pattern.ask
import akka.util.Timeout
import com.example.akka.DefaultJsonFormats
import com.example.akka.add.AddActor._
import io.swagger.v3.oas.annotations.Operation
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.parameters.RequestBody
import io.swagger.v3.oas.annotations.responses.ApiResponse
import javax.ws.rs.core.MediaType
import spray.json.RootJsonFormat

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

@Path("/add")
class AddService(addActor: ActorRef)(implicit executionContext: ExecutionContext)
  extends Directives with DefaultJsonFormats {

  implicit val timeout: Timeout = Timeout(2.seconds)

  implicit val requestFormat: RootJsonFormat[AddRequest] = jsonFormat1(AddRequest)
  implicit val responseFormat: RootJsonFormat[AddResponse] = jsonFormat1(AddResponse)

  val route: Route = add

  @POST
  @Consumes(Array(MediaType.APPLICATION_JSON))
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Add integers", description = "Add integers",
    requestBody = new RequestBody(content = Array(new Content(schema = new Schema(implementation = classOf[AddRequest])))),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Add response",
        content = Array(new Content(schema = new Schema(implementation = classOf[AddResponse])))),
      new ApiResponse(responseCode = "500", description = "Internal server error"))
  )
  def add: Route =
    path("add") {
      post {
        entity(as[AddRequest]) { request =>
          complete { (addActor ? request).mapTo[AddResponse] }
        }
      }
    }

} 
Example 22
Source File: EchoEnumeratumService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.echoenumeratum

import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.{Directives, Route}
import io.swagger.v3.oas.annotations.Operation
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.parameters.RequestBody
import io.swagger.v3.oas.annotations.responses.ApiResponse
import javax.ws.rs.core.MediaType
import javax.ws.rs.{Consumes, POST, Path, Produces}
import pl.iterators.kebs.json.{KebsEnumFormats, KebsSpray}
import spray.json.{DefaultJsonProtocol, RootJsonFormat}

@Path("/echoenumeratum")
object EchoEnumeratumService extends Directives with SprayJsonSupport with DefaultJsonProtocol
  with KebsSpray with KebsEnumFormats {

  case class EchoEnumeratum(enumValue: SizeEnum)

  implicit val echoEnumeratumFormat: RootJsonFormat[EchoEnumeratum] = jsonFormatN[EchoEnumeratum]

  val route: Route = echo

  @POST
  @Consumes(Array(MediaType.APPLICATION_JSON))
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Echo Enumeratum", description = "Echo Enumeratum",
    requestBody = new RequestBody(content = Array(new Content(schema = new Schema(implementation = classOf[EchoEnumeratum])))),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Echo Enumeratum",
        content = Array(new Content(schema = new Schema(implementation = classOf[EchoEnumeratum])))),
      new ApiResponse(responseCode = "400", description = "Bad Request"))
  )
  def echo: Route =
    path("echoenumeratum") {
      post {
        entity(as[EchoEnumeratum]) { request =>
          complete(request)
        }
      }
    }

} 
Example 23
Source File: AddOptionService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.addoption

import javax.ws.rs.{Consumes, POST, Path, Produces}
import akka.actor.ActorRef
import akka.http.scaladsl.server.{Directives, Route}
import akka.pattern.ask
import akka.util.Timeout
import com.example.akka.DefaultJsonFormats
import com.example.akka.addoption.AddOptionActor._
import io.swagger.v3.oas.annotations.Operation
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.parameters.RequestBody
import io.swagger.v3.oas.annotations.responses.ApiResponse
import javax.ws.rs.core.MediaType
import spray.json.RootJsonFormat

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

@Path("/addOption")
class AddOptionService(addActor: ActorRef)(implicit executionContext: ExecutionContext)
  extends Directives with DefaultJsonFormats {

  implicit val timeout: Timeout = Timeout(2.seconds)

  implicit val requestFormat: RootJsonFormat[AddOptionRequest] = jsonFormat2(AddOptionRequest)
  implicit val responseFormat: RootJsonFormat[AddOptionResponse] = jsonFormat1(AddOptionResponse)

  val route: Route = addOption

  @POST
  @Consumes(Array(MediaType.APPLICATION_JSON))
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Add integers", description = "Add integers",
    requestBody = new RequestBody(content = Array(new Content(schema = new Schema(implementation = classOf[AddOptionRequest])))),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Add response",
        content = Array(new Content(schema = new Schema(implementation = classOf[AddOptionResponse])))),
      new ApiResponse(responseCode = "500", description = "Internal server error"))
  )
  def addOption: Route =
    path("addOption") {
      post {
        entity(as[AddOptionRequest]) { request =>
          complete { (addActor ? request).mapTo[AddOptionResponse] }
        }
      }
    }

} 
Example 24
Source File: HelloService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.hello

import javax.ws.rs.{GET, Path, Produces}
import akka.actor.ActorRef
import akka.http.scaladsl.server.{Directives, Route}
import akka.pattern.ask
import akka.util.Timeout
import com.example.akka.DefaultJsonFormats
import com.example.akka.hello.HelloActor._
import io.swagger.v3.oas.annotations.enums.ParameterIn
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.responses.ApiResponse
import io.swagger.v3.oas.annotations.{Operation, Parameter}
import javax.ws.rs.core.MediaType
import spray.json.RootJsonFormat

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

@Path("/hello")
class HelloService(hello: ActorRef)(implicit executionContext: ExecutionContext)
  extends Directives with DefaultJsonFormats {

  implicit val timeout: Timeout = Timeout(2.seconds)
  implicit val greetingFormat: RootJsonFormat[Greeting] = jsonFormat1(Greeting)

  val route: Route =
    getHello ~
    getHelloSegment

  @GET
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Return Hello greeting (anonymous)", description = "Return Hello greeting for anonymous request",
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Hello response",
        content = Array(new Content(schema = new Schema(implementation = classOf[Greeting])))),
      new ApiResponse(responseCode = "500", description = "Internal server error"))
  )
  def getHello: Route =
    path("hello") {
      get {
        complete { (hello ? AnonymousHello).mapTo[Greeting] }
      }
    }

  @GET
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Return Hello greeting", description = "Return Hello greeting for named user",
    parameters = Array(new Parameter(name = "name", in = ParameterIn.PATH, description = "user name")),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Hello response",
        content = Array(new Content(schema = new Schema(implementation = classOf[Greeting])))),
      new ApiResponse(responseCode = "500", description = "Internal server error"))
  )
  def getHelloSegment: Route =
    path("hello" / Segment) { name =>
      get {
        complete { (hello ? Hello(name)).mapTo[Greeting] }
      }
    }
} 
Example 25
Source File: EchoEnumService.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.echoenum

import akka.http.scaladsl.server.{Directives, Route}
import com.example.akka.DefaultJsonFormats
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.module.scala.JsonScalaEnumeration
import io.swagger.v3.oas.annotations.Operation
import io.swagger.v3.oas.annotations.media.{Content, Schema}
import io.swagger.v3.oas.annotations.parameters.RequestBody
import io.swagger.v3.oas.annotations.responses.ApiResponse
import javax.ws.rs.core.MediaType
import javax.ws.rs.{Consumes, POST, Path, Produces}
import spray.json.{DeserializationException, JsString, JsValue, RootJsonFormat}

@Path("/echoenum")
object EchoEnumService extends Directives with DefaultJsonFormats {

  //case class EchoEnum(@Schema(required = true, `type` = "string", allowableValues = Array("TALL", "GRANDE", "VENTI"))
  //                    enumValue: SizeEnum.Value)
  class SizeEnumTypeClass extends TypeReference[SizeEnum.type]
  case class EchoEnum(@JsonScalaEnumeration(classOf[SizeEnumTypeClass]) enumValue: SizeEnum.Value)

  implicit val enumFormat: RootJsonFormat[SizeEnum.Value] =
    new RootJsonFormat[SizeEnum.Value] {
      def write(obj: SizeEnum.Value): JsValue = JsString(obj.toString)
      def read(json: JsValue): SizeEnum.Value = {
        json match {
          case JsString(txt) => SizeEnum.withName(txt)
          case somethingElse => throw DeserializationException(s"Expected a value from enum $SizeEnum instead of $somethingElse")
        }
      }
    }
  implicit val echoEnumFormat: RootJsonFormat[EchoEnum] = jsonFormat1(EchoEnum)

  val route: Route = echo

  @POST
  @Consumes(Array(MediaType.APPLICATION_JSON))
  @Produces(Array(MediaType.APPLICATION_JSON))
  @Operation(summary = "Echo Enum", description = "Echo Enum",
    requestBody = new RequestBody(content = Array(new Content(schema = new Schema(implementation = classOf[EchoEnum])))),
    responses = Array(
      new ApiResponse(responseCode = "200", description = "Echo Enum",
        content = Array(new Content(schema = new Schema(implementation = classOf[EchoEnum])))),
      new ApiResponse(responseCode = "400", description = "Bad Request"))
  )
  def echo: Route =
    path("echoenum") {
      post {
        entity(as[EchoEnum]) { request =>
          complete(request)
        }
      }
    }

} 
Example 26
Source File: MappingProvider.scala    From orders-aws   with Apache License 2.0 5 votes vote down vote up
package works.weave.socks.aws.orders.presentation

import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider
import javax.ws.rs.Produces
import javax.ws.rs.core.MediaType
import javax.ws.rs.ext.Provider
import works.weave.socks.aws.orders.ProjectDefaultJacksonMapper

@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
class MappingProvider extends JacksonJaxbJsonProvider {
  setMapper(MappingProvider.mapper)
}
object MappingProvider {
  val mapper = {
    val presentationMapper = ProjectDefaultJacksonMapper.build()
    // modify as necessary for presentation purpose
    presentationMapper
  }

} 
Example 27
Source File: ApplicationListResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        val start = attempt.startTime.getTime
        start >= minDate.timestamp && start <= maxDate.timestamp
      }
    }.take(numApps)
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          duration =
            if (internalAttemptInfo.endTime > 0) {
              internalAttemptInfo.endTime - internalAttemptInfo.startTime
            } else {
              0
            },
          lastUpdated = new Date(internalAttemptInfo.lastUpdated),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }
} 
Example 28
Source File: AllRDDResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.activeStorageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.activeStorageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 29
Source File: EventLogDownloadResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 30
Source File: ApplicationListResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        val start = attempt.startTime.getTime
        start >= minDate.timestamp && start <= maxDate.timestamp
      }
    }.take(numApps)
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          duration =
            if (internalAttemptInfo.endTime > 0) {
              internalAttemptInfo.endTime - internalAttemptInfo.startTime
            } else {
              0
            },
          lastUpdated = new Date(internalAttemptInfo.lastUpdated),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }
} 
Example 31
Source File: OneJobResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 32
Source File: AllExecutorListResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      (0 until listener.activeStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      } ++ (0 until listener.deadStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = false)
      }
    }
  }
} 
Example 33
Source File: ExecutorListResource.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.activeStorageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      }
    }
  }
} 
Example 34
Source File: Status.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.kb.http.endpoints

import java.time.LocalTime
import io.swagger.annotations.Api
import javax.ws.rs.Path
import javax.ws.rs.GET
import javax.ws.rs.Produces
import io.swagger.annotations.ApiOperation
import javax.ws.rs.core.MediaType
import org.slf4j.LoggerFactory
import javax.ws.rs.core.Context
import javax.ws.rs.core.UriInfo
import javax.ws.rs.core.Request
import it.almawave.linkeddata.kb.utils.JSONHelper
import java.time.LocalDateTime
import java.time.ZonedDateTime
import java.time.format.DateTimeFormatter
import java.util.Locale
import java.time.ZoneId

@Api(tags = Array("catalog"))
@Path("/status")
class Status {

  private val logger = LoggerFactory.getLogger(this.getClass)

  @Context
  var uriInfo: UriInfo = null

  @GET
  @Produces(Array(MediaType.APPLICATION_JSON))
  @ApiOperation(nickname = "status", value = "endpoint status")
  def status() = {

    val base_uri = uriInfo.getBaseUri
    val msg = s"the service is running at ${base_uri}"
    logger.info(msg)

    val _now = now()
    StatusMsg(_now._1, _now._2, msg)

  }

  def now() = {

    val zdt = ZonedDateTime.now(ZoneId.of("+1"))
    val dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSZ")

    (zdt.format(dtf), zdt)

  }

}

case class StatusMsg(
  now:      String,
  dateTime: ZonedDateTime,
  msg:      String
) 
Example 35
Source File: JacksonScalaProvider.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.kb.http.providers

import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider
import com.fasterxml.jackson.databind.ObjectMapper
import javax.ws.rs.ext.Provider
import javax.ws.rs.Produces
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import javax.ws.rs.core.MediaType
import com.fasterxml.jackson.annotation.JsonInclude

import com.fasterxml.jackson.annotation.JsonAnyGetter

import com.fasterxml.jackson.databind.SerializationFeature
import com.fasterxml.jackson.databind.DeserializationFeature
import javax.ws.rs.ext.ContextResolver
import com.fasterxml.jackson.databind.JsonSerializer
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.databind.SerializerProvider
import java.lang.Double
import java.lang.Boolean

@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
class JacksonScalaProvider extends JacksonJaxbJsonProvider with ContextResolver[ObjectMapper] {

  println("\n\nregistered " + this.getClass)

  val mapper = new ObjectMapper()

  mapper
    .registerModule(DefaultScalaModule)
    .setSerializationInclusion(JsonInclude.Include.ALWAYS)

    .configure(SerializationFeature.INDENT_OUTPUT, true)
    .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, true)

    .configure(SerializationFeature.WRITE_NULL_MAP_VALUES, true)
    .configure(SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED, true)
    .configure(SerializationFeature.WRITE_EMPTY_JSON_ARRAYS, true)

    .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
    .configure(DeserializationFeature.ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT, true)
    .configure(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT, true)
    .configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, true)
    //    .setVisibility(JsonMethod.FIELD, Visibility.ANY);

    .getSerializerProvider.setNullValueSerializer(new JsonSerializer[Object] {
      def serialize(obj: Object, gen: JsonGenerator, provider: SerializerProvider) {
        obj match {
          case bool: Boolean   => gen.writeBoolean(false)
          case number: Integer => gen.writeNumber(0)
          case number: Double  => gen.writeNumber(0.0D)
          case text: String    => gen.writeString("")
          case _               => gen.writeString("")
        }
      }
    })

  super.setMapper(mapper)

  override def getContext(klasses: Class[_]): ObjectMapper = mapper

} 
Example 36
Source File: ExecutorNumResource.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Path, Produces}
import javax.ws.rs.core.MediaType

import scala.collection.JavaConverters._

import org.apache.spark.monitor.{ExecutorNum, ExecutorNumWrapper}

@Path("/v1")
@Produces(Array(MediaType.APPLICATION_JSON))
class ExecutorNumResource extends BaseAppResource {
  @GET
  @Path("applications/{appId}/executorNumCurve")
  def executorNumCurve1(): Seq[ExecutorNum] =
    withUI(_.store.store.view(classOf[ExecutorNumWrapper]).asScala.map(_.point).toSeq)
  @GET
  @Path("applications/{appId}/{attemptId}/executorNumCurve")
  def executorNumCurve2(): Seq[ExecutorNum] =
    withUI(_.store.store.view(classOf[ExecutorNumWrapper]).asScala.map(_.point).toSeq)

} 
Example 37
Source File: JacksonMessageWriter.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.nio.charset.StandardCharsets
import java.text.SimpleDateFormat
import java.util.{Calendar, Locale, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes(StandardCharsets.UTF_8))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'", Locale.US)
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
} 
Example 38
Source File: AllRDDResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.activeStorageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.activeStorageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 39
Source File: EventLogDownloadResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 40
Source File: ApplicationListResource.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo
import org.apache.spark.deploy.master.{ApplicationInfo => InternalApplicationInfo}

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam)
  : Iterator[ApplicationInfo] = {
    val allApps = uiRoot.getApplicationInfoList
    val adjStatus = {
      if (status.isEmpty) {
        Arrays.asList(ApplicationStatus.values(): _*)
      } else {
        status
      }
    }
    val includeCompleted = adjStatus.contains(ApplicationStatus.COMPLETED)
    val includeRunning = adjStatus.contains(ApplicationStatus.RUNNING)
    allApps.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running
      val statusOk = (!anyRunning && includeCompleted) ||
        (anyRunning && includeRunning)
      // keep the app if *any* attempts fall in the right time window
      val dateOk = app.attempts.exists { attempt =>
        attempt.startTime.getTime >= minDate.timestamp &&
          attempt.startTime.getTime <= maxDate.timestamp
      }
      statusOk && dateOk
    }
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }

  def convertApplicationInfo(
      internal: InternalApplicationInfo,
      completed: Boolean): ApplicationInfo = {
    // standalone application info always has just one attempt
    new ApplicationInfo(
      id = internal.id,
      name = internal.desc.name,
      attempts = Seq(new ApplicationAttemptInfo(
        attemptId = None,
        startTime = new Date(internal.startTime),
        endTime = new Date(internal.endTime),
        sparkUser = internal.desc.user,
        completed = completed
      ))
    )
  }

} 
Example 41
Source File: OneJobResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 42
Source File: AllExecutorListResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      (0 until listener.activeStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      } ++ (0 until listener.deadStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = false)
      }
    }
  }
} 
Example 43
Source File: ExecutorListResource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.activeStorageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      }
    }
  }
} 
Example 44
Source File: JacksonMessageWriter.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.nio.charset.StandardCharsets
import java.text.SimpleDateFormat
import java.util.{Calendar, Locale, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes(StandardCharsets.UTF_8))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'", Locale.US)
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
} 
Example 45
Source File: AllRDDResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.storage.{RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.storage.StorageListener

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllRDDResource(ui: SparkUI) {

  @GET
  def rddList(): Seq[RDDStorageInfo] = {
    val storageStatusList = ui.storageListener.activeStorageStatusList
    val rddInfos = ui.storageListener.rddInfoList
    rddInfos.map{rddInfo =>
      AllRDDResource.getRDDStorageInfo(rddInfo.id, rddInfo, storageStatusList,
        includeDetails = false)
    }
  }

}

private[spark] object AllRDDResource {

  def getRDDStorageInfo(
      rddId: Int,
      listener: StorageListener,
      includeDetails: Boolean): Option[RDDStorageInfo] = {
    val storageStatusList = listener.activeStorageStatusList
    listener.rddInfoList.find { _.id == rddId }.map { rddInfo =>
      getRDDStorageInfo(rddId, rddInfo, storageStatusList, includeDetails)
    }
  }

  def getRDDStorageInfo(
      rddId: Int,
      rddInfo: RDDInfo,
      storageStatusList: Seq[StorageStatus],
      includeDetails: Boolean): RDDStorageInfo = {
    val workers = storageStatusList.map { (rddId, _) }
    val blockLocations = StorageUtils.getRddBlockLocations(rddId, storageStatusList)
    val blocks = storageStatusList
      .flatMap { _.rddBlocksById(rddId) }
      .sortWith { _._1.name < _._1.name }
      .map { case (blockId, status) =>
        (blockId, status, blockLocations.getOrElse(blockId, Seq[String]("Unknown")))
      }

    val dataDistribution = if (includeDetails) {
      Some(storageStatusList.map { status =>
        new RDDDataDistribution(
          address = status.blockManagerId.hostPort,
          memoryUsed = status.memUsedByRdd(rddId),
          memoryRemaining = status.memRemaining,
          diskUsed = status.diskUsedByRdd(rddId)
        ) } )
    } else {
      None
    }
    val partitions = if (includeDetails) {
      Some(blocks.map { case (id, block, locations) =>
        new RDDPartitionInfo(
          blockName = id.name,
          storageLevel = block.storageLevel.description,
          memoryUsed = block.memSize,
          diskUsed = block.diskSize,
          executors = locations
        )
      } )
    } else {
      None
    }

    new RDDStorageInfo(
      id = rddId,
      name = rddInfo.name,
      numPartitions = rddInfo.numPartitions,
      numCachedPartitions = rddInfo.numCachedPartitions,
      storageLevel = rddInfo.storageLevel.description,
      memoryUsed = rddInfo.memSize,
      diskUsed = rddInfo.diskSize,
      dataDistribution = dataDistribution,
      partitions = partitions
    )
  }
} 
Example 46
Source File: EventLogDownloadResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.util.zip.ZipOutputStream
import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.{MediaType, Response, StreamingOutput}

import scala.util.control.NonFatal

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging

@Produces(Array(MediaType.APPLICATION_OCTET_STREAM))
private[v1] class EventLogDownloadResource(
    val uIRoot: UIRoot,
    val appId: String,
    val attemptId: Option[String]) extends Logging {
  val conf = SparkHadoopUtil.get.newConfiguration(new SparkConf)

  @GET
  def getEventLogs(): Response = {
    try {
      val fileName = {
        attemptId match {
          case Some(id) => s"eventLogs-$appId-$id.zip"
          case None => s"eventLogs-$appId.zip"
        }
      }

      val stream = new StreamingOutput {
        override def write(output: OutputStream): Unit = {
          val zipStream = new ZipOutputStream(output)
          try {
            uIRoot.writeEventLogs(appId, attemptId, zipStream)
          } finally {
            zipStream.close()
          }

        }
      }

      Response.ok(stream)
        .header("Content-Disposition", s"attachment; filename=$fileName")
        .header("Content-Type", MediaType.APPLICATION_OCTET_STREAM)
        .build()
    } catch {
      case NonFatal(e) =>
        Response.serverError()
          .entity(s"Event logs are not available for app: $appId.")
          .status(Response.Status.SERVICE_UNAVAILABLE)
          .build()
    }
  }
} 
Example 47
Source File: ApplicationListResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.util.{Date, List => JList}
import javax.ws.rs.{DefaultValue, GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType

import org.apache.spark.deploy.history.ApplicationHistoryInfo

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ApplicationListResource(uiRoot: UIRoot) {

  @GET
  def appList(
      @QueryParam("status") status: JList[ApplicationStatus],
      @DefaultValue("2010-01-01") @QueryParam("minDate") minDate: SimpleDateParam,
      @DefaultValue("3000-01-01") @QueryParam("maxDate") maxDate: SimpleDateParam,
      @QueryParam("limit") limit: Integer)
  : Iterator[ApplicationInfo] = {

    val numApps = Option(limit).map(_.toInt).getOrElse(Integer.MAX_VALUE)
    val includeCompleted = status.isEmpty || status.contains(ApplicationStatus.COMPLETED)
    val includeRunning = status.isEmpty || status.contains(ApplicationStatus.RUNNING)

    uiRoot.getApplicationInfoList.filter { app =>
      val anyRunning = app.attempts.exists(!_.completed)
      // if any attempt is still running, we consider the app to also still be running;
      // keep the app if *any* attempts fall in the right time window
      ((!anyRunning && includeCompleted) || (anyRunning && includeRunning)) &&
      app.attempts.exists { attempt =>
        val start = attempt.startTime.getTime
        start >= minDate.timestamp && start <= maxDate.timestamp
      }
    }.take(numApps)
  }
}

private[spark] object ApplicationsListResource {
  def appHistoryInfoToPublicAppInfo(app: ApplicationHistoryInfo): ApplicationInfo = {
    new ApplicationInfo(
      id = app.id,
      name = app.name,
      coresGranted = None,
      maxCores = None,
      coresPerExecutor = None,
      memoryPerExecutorMB = None,
      attempts = app.attempts.map { internalAttemptInfo =>
        new ApplicationAttemptInfo(
          attemptId = internalAttemptInfo.attemptId,
          startTime = new Date(internalAttemptInfo.startTime),
          endTime = new Date(internalAttemptInfo.endTime),
          duration =
            if (internalAttemptInfo.endTime > 0) {
              internalAttemptInfo.endTime - internalAttemptInfo.startTime
            } else {
              0
            },
          lastUpdated = new Date(internalAttemptInfo.lastUpdated),
          sparkUser = internalAttemptInfo.sparkUser,
          completed = internalAttemptInfo.completed
        )
      }
    )
  }
} 
Example 48
Source File: OneJobResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class OneJobResource(ui: SparkUI) {

  @GET
  def oneJob(@PathParam("jobId") jobId: Int): JobData = {
    val statusToJobs: Seq[(JobExecutionStatus, Seq[JobUIData])] =
      AllJobsResource.getStatusToJobs(ui)
    val jobOpt = statusToJobs.flatMap(_._2).find { jobInfo => jobInfo.jobId == jobId}
    jobOpt.map { job =>
      AllJobsResource.convertJobData(job, ui.jobProgressListener, false)
    }.getOrElse {
      throw new NotFoundException("unknown job: " + jobId)
    }
  }

} 
Example 49
Source File: AllExecutorListResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      (0 until listener.activeStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      } ++ (0 until listener.deadStorageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = false)
      }
    }
  }
} 
Example 50
Source File: ExecutorListResource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import javax.ws.rs.{GET, PathParam, Produces}
import javax.ws.rs.core.MediaType

import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.exec.ExecutorsPage

@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class ExecutorListResource(ui: SparkUI) {

  @GET
  def executorList(): Seq[ExecutorSummary] = {
    val listener = ui.executorsListener
    listener.synchronized {
      // The follow codes should be protected by `listener` to make sure no executors will be
      // removed before we query their status. See SPARK-12784.
      val storageStatusList = listener.activeStorageStatusList
      (0 until storageStatusList.size).map { statusId =>
        ExecutorsPage.getExecInfo(listener, statusId, isActive = true)
      }
    }
  }
} 
Example 51
Source File: JacksonMessageWriter.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.status.api.v1

import java.io.OutputStream
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.text.SimpleDateFormat
import java.util.{Calendar, SimpleTimeZone}
import javax.ws.rs.Produces
import javax.ws.rs.core.{MediaType, MultivaluedMap}
import javax.ws.rs.ext.{MessageBodyWriter, Provider}

import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.databind.{ObjectMapper, SerializationFeature}


@Provider
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class JacksonMessageWriter extends MessageBodyWriter[Object]{

  val mapper = new ObjectMapper() {
    override def writeValueAsString(t: Any): String = {
      super.writeValueAsString(t)
    }
  }
  mapper.registerModule(com.fasterxml.jackson.module.scala.DefaultScalaModule)
  mapper.enable(SerializationFeature.INDENT_OUTPUT)
  mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL)
  mapper.setDateFormat(JacksonMessageWriter.makeISODateFormat)

  override def isWriteable(
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Boolean = {
      true
  }

  override def writeTo(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType,
      multivaluedMap: MultivaluedMap[String, AnyRef],
      outputStream: OutputStream): Unit = {
    t match {
      case ErrorWrapper(err) => outputStream.write(err.getBytes("utf-8"))
      case _ => mapper.writeValue(outputStream, t)
    }
  }

  override def getSize(
      t: Object,
      aClass: Class[_],
      `type`: Type,
      annotations: Array[Annotation],
      mediaType: MediaType): Long = {
    -1L
  }
}

private[spark] object JacksonMessageWriter {
  def makeISODateFormat: SimpleDateFormat = {
    val iso8601 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'GMT'")
    val cal = Calendar.getInstance(new SimpleTimeZone(0, "GMT"))
    iso8601.setCalendar(cal)
    iso8601
  }
}