org.reactivestreams.Publisher Scala Examples

The following examples show how to use org.reactivestreams.Publisher. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: AsyncHttpClientFs2Backend.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.asynchttpclient.fs2

import java.io.File
import java.nio.ByteBuffer

import cats.effect._
import cats.effect.implicits._
import cats.implicits._
import fs2.{Chunk, Stream}
import fs2.interop.reactivestreams._
import io.netty.buffer.{ByteBuf, Unpooled}
import org.asynchttpclient.{Request => _, Response => _, _}
import org.reactivestreams.Publisher
import sttp.client.asynchttpclient.{AsyncHttpClientBackend, WebSocketHandler}
import sttp.client.impl.cats.CatsMonadAsyncError
import sttp.client.internal._
import sttp.client.testing.SttpBackendStub
import sttp.client.ws.WebSocketResponse
import sttp.client.{FollowRedirectsBackend, SttpBackend, SttpBackendOptions, _}

import scala.concurrent.ExecutionContext
import scala.language.higherKinds

class AsyncHttpClientFs2Backend[F[_]: ConcurrentEffect: ContextShift] private (
    asyncHttpClient: AsyncHttpClient,
    closeClient: Boolean,
    customizeRequest: BoundRequestBuilder => BoundRequestBuilder
) extends AsyncHttpClientBackend[F, Stream[F, Byte]](
      asyncHttpClient,
      new CatsMonadAsyncError,
      closeClient,
      customizeRequest
    ) {
  override def send[T](r: Request[T, Stream[F, Byte]]): F[Response[T]] = {
    super.send(r).guarantee(implicitly[ContextShift[F]].shift)
  }

  override def openWebsocket[T, WS_RESULT](
      r: Request[T, Stream[F, Byte]],
      handler: WebSocketHandler[WS_RESULT]
  ): F[WebSocketResponse[WS_RESULT]] = super.openWebsocket(r, handler).guarantee(ContextShift[F].shift)

  override protected def streamBodyToPublisher(s: Stream[F, Byte]): Publisher[ByteBuf] =
    s.chunks.map(c => Unpooled.wrappedBuffer(c.toArray)).toUnicastPublisher

  override protected def publisherToStreamBody(p: Publisher[ByteBuffer]): Stream[F, Byte] =
    p.toStream[F].flatMap(buf => Stream.chunk(Chunk.byteBuffer(buf)))

  override protected def publisherToBytes(p: Publisher[ByteBuffer]): F[Array[Byte]] = {
    p.toStream[F]
      .compile
      .fold(ByteBuffer.allocate(0))(concatByteBuffers)
      .map(_.array())
  }

  override protected def publisherToFile(p: Publisher[ByteBuffer], f: File): F[Unit] = {
    p.toStream[F]
      .flatMap(b => Stream.emits(b.array()))
      .through(fs2.io.file.writeAll(f.toPath, Blocker.liftExecutionContext(ExecutionContext.global)))
      .compile
      .drain
  }
}

object AsyncHttpClientFs2Backend {
  private def apply[F[_]: ConcurrentEffect: ContextShift](
      asyncHttpClient: AsyncHttpClient,
      closeClient: Boolean,
      customizeRequest: BoundRequestBuilder => BoundRequestBuilder
  ): SttpBackend[F, Stream[F, Byte], WebSocketHandler] =
    new FollowRedirectsBackend(new AsyncHttpClientFs2Backend(asyncHttpClient, closeClient, customizeRequest))

  def apply[F[_]: ConcurrentEffect: ContextShift](
      options: SttpBackendOptions = SttpBackendOptions.Default,
      customizeRequest: BoundRequestBuilder => BoundRequestBuilder = identity
  ): F[SttpBackend[F, Stream[F, Byte], WebSocketHandler]] =
    implicitly[Sync[F]]
      .delay(apply[F](AsyncHttpClientBackend.defaultClient(options), closeClient = true, customizeRequest))

  
  def stub[F[_]: Concurrent]: SttpBackendStub[F, Stream[F, ByteBuffer], WebSocketHandler] =
    SttpBackendStub(new CatsMonadAsyncError())
} 
Example 2
Source File: UseCase.scala    From Fast-Data-Processing-Systems-with-SMACK-Stack   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.softwaremill.react.kafka.KafkaMessages._
import org.apache.kafka.common.serialization.{StringSerializer, StringDeserializer}
import com.softwaremill.react.kafka.{ProducerMessage, ConsumerProperties, ProducerProperties, ReactiveKafka}
import org.reactivestreams.{ Publisher, Subscriber }

implicit val actorSystem = ActorSystem("ReactiveKafka")
implicit val materializer = ActorMaterializer()

val kafka = new ReactiveKafka()
val publisher: Publisher[StringConsumerRecord] = kafka.consume(ConsumerProperties(
 bootstrapServers = "localhost:9092",
 topic = "lowercaseStrings",
 groupId = "groupName",
 valueDeserializer = new StringDeserializer()
))

val subscriber: Subscriber[StringProducerMessage] = kafka.publish(ProducerProperties(
  bootstrapServers = "localhost:9092",
  topic = "uppercaseStrings",
  valueSerializer = new StringSerializer()
))

Source.fromPublisher(publisher).map(m => ProducerMessage(m.value().toUpperCase))
  .to(Sink.fromSubscriber(subscriber)).run() 
Example 3
Source File: StreamToPublisherSpec.scala    From interop-reactive-streams   with Apache License 2.0 5 votes vote down vote up
package zio.interop.reactivestreams

import java.lang.reflect.InvocationTargetException
import org.reactivestreams.Publisher
import org.reactivestreams.tck.{ PublisherVerification, TestEnvironment }
import org.testng.annotations.Test
import zio.Task
import zio.UIO
import zio.ZIO
import zio.blocking._
import zio.stream.Stream
import zio.test._
import zio.test.Assertion._

object StreamToPublisherSpec extends DefaultRunnableSpec {
  override def spec =
    suite("Converting a `Stream` to a `Publisher`")(
      suite("passes all required and optional TCK tests")(tests: _*)
    )

  def makePV(runtime: zio.Runtime[Any]) =
    new PublisherVerification[Int](new TestEnvironment(2000, 500), 2000L) {

      def createPublisher(elements: Long): Publisher[Int] =
        runtime.unsafeRun(
          Stream
            .unfold(elements)(n => if (n > 0) Some((1, n - 1)) else None)
            .toPublisher
        )

      override def createFailedPublisher(): Publisher[Int] =
        runtime.unsafeRun(
          Stream
            .fail(new RuntimeException("boom!"))
            .map(_.asInstanceOf[Int])
            .toPublisher
        )
    }

  val tests =
    classOf[PublisherVerification[Int]]
      .getMethods()
      .toList
      .filter { method =>
        method
          .getAnnotations()
          .exists(annotation => classOf[Test].isAssignableFrom(annotation.annotationType()))
      }
      .collect {
        case method if method.getName().startsWith("untested") =>
          test(method.getName())(assert(())(anything)) @@ TestAspect.ignore
        case method =>
          testM(method.getName())(
            for {
              runtime <- ZIO.runtime[Any]
              pv      = makePV(runtime)
              _       <- UIO(pv.setUp())
              r <- blocking(Task(method.invoke(pv))).unit.mapError {
                    case e: InvocationTargetException => e.getTargetException()
                  }.run
            } yield assert(r)(succeeds(isUnit))
          )
      }
} 
Example 4
Source File: Converters.scala    From korolev   with Apache License 2.0 5 votes vote down vote up
package korolev.akka

import akka.NotUsed
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Sink, Source}
import korolev.akka.util.{KorolevStreamPublisher, KorolevStreamSubscriber}
import korolev.effect.{Effect, Stream}
import org.reactivestreams.Publisher

object Converters {

  implicit final class SinkCompanionOps(value: Sink.type) {
    def korolevStream[F[_]: Effect, T]: Sink[T, Stream[F, T]] = {
      val subscriber = new KorolevStreamSubscriber[F, T]()
      Sink
        .fromSubscriber(subscriber)
        .mapMaterializedValue(_ => subscriber)
    }
  }

  implicit final class StreamCompanionOps(value: Stream.type) {
    def fromPublisher[F[_]: Effect, T](publisher: Publisher[T]): Stream[F, T] = {
      val result = new KorolevStreamSubscriber[F, T]()
      publisher.subscribe(result)
      result
    }
  }

  implicit final class KorolevStreamsOps[F[_]: Effect, T](stream: Stream[F, T]) {

    
    def asPublisher(fanout: Boolean = false): Publisher[T] =
      new KorolevStreamPublisher(stream, fanout)

    def asAkkaSource: Source[T, NotUsed] = {
      val publisher = new KorolevStreamPublisher(stream, fanout = false)
      Source
        .fromPublisher(publisher)
        .buffer(10, OverflowStrategy.backpressure) // FIXME should work without this line. Looks like bug in akka-streams
    }
  }
} 
Example 5
Source File: PublisherSpoutStage.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.impl.stages.spout

import org.reactivestreams.{Publisher, Subscriber, Subscription}
import swave.core.Stage
import swave.core.impl.Outport
import swave.core.impl.rs.RSCompliance
import swave.core.impl.stages.SpoutStage
import swave.core.macros.StageImplementation
import swave.core.util._

// format: OFF
@StageImplementation
private[core] final class PublisherSpoutStage(publisher: Publisher[AnyRef]) extends SpoutStage  { stage =>

  def kind = Stage.Kind.Spout.FromPublisher(publisher)

  connectOutAndSealWith { out ⇒
    region.impl.requestDispatcherAssignment()
    region.impl.registerForXStart(this)
    awaitingXStart(out)
  }

  def awaitingXStart(out: Outport) = state(
    xStart = () => {
      publisher.subscribe {
        new Subscriber[AnyRef] {
          def onSubscribe(s: Subscription) = {
            RSCompliance.verifyNonNull(s, "Subscription", "2.13")
            region.enqueueXEvent(stage, s)
          }
          def onNext(elem: AnyRef) = {
            RSCompliance.verifyNonNull(elem, "Element", "2.13")
            region.enqueueOnNext(stage, elem, stage)
          }
          def onComplete() = region.enqueueOnComplete(stage, stage)
          def onError(e: Throwable) = {
            RSCompliance.verifyNonNull(e, "Throwable", "2.13")
            region.enqueueOnError(stage, e, stage)
          }
        }
      }
      awaitingSubscription(out, 0L)
    })

  def awaitingSubscription(out: Outport, requested: Long): State = state(
    request = (n, _) ⇒ awaitingSubscription(out, requested ⊹ n),
    cancel = _ => awaitingSubscriptionDownstreamCancelled(),

    xEvent = {
      case s: Subscription =>
        if (requested > 0) s.request(requested)
        running(out, s)
    })

  def awaitingSubscriptionDownstreamCancelled(): State = state(
    request = (_, _) ⇒ stay(),
    cancel = _ => stay(),

    xEvent = {
      case s: Subscription =>
        s.cancel()
        stop()
    })

  def running(out: Outport, subscription: Subscription) = state(
    intercept = false,

    request = (n, _) ⇒ {
      subscription.request(n.toLong)
      stay()
    },

    cancel = _ => {
      subscription.cancel()
      stop()
    },

    onNext = onNextF(out),
    onComplete = stopCompleteF(out),
    onError = stopErrorF(out),

    xEvent = { case s: Subscription =>
      s.cancel()
      stay()
    })
} 
Example 6
Source File: Streamable.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core

import org.reactivestreams.Publisher
import scala.annotation.implicitNotFound
import scala.collection.immutable
import scala.concurrent.Future
import scala.util.Try
import swave.core.impl.util.RingBuffer
import swave.core.io.Bytes

@implicitNotFound(
  msg =
    "Don't know how to create a stream from instances of type ${T}. Maybe you'd like to provide an `implicit Streamable[${T}]`?")
//#source-quote
abstract class Streamable[-T] {
  type Out
  def apply(value: T): Spout[Out]
}
//#source-quote

object Streamable {
  type Aux[T, O] = Streamable[T] { type Out = O }

  private val spout =
    new Streamable[Spout[AnyRef]] {
      type Out = AnyRef
      def apply(value: Spout[AnyRef]) = value
    }
  implicit def forSpout[T]: Aux[Spout[T], T] = spout.asInstanceOf[Aux[Spout[T], T]]

  private val option =
    new Streamable[Option[AnyRef]] {
      type Out = AnyRef
      def apply(value: Option[AnyRef]) = Spout.fromOption(value)
    }
  implicit def forOption[T]: Aux[Option[T], T] = option.asInstanceOf[Aux[Option[T], T]]

  private val iterable =
    new Streamable[immutable.Iterable[AnyRef]] {
      type Out = AnyRef
      def apply(value: immutable.Iterable[AnyRef]) = Spout.fromIterable(value)
    }
  implicit def forIterable[T]: Aux[immutable.Iterable[T], T] =
    iterable.asInstanceOf[Aux[immutable.Iterable[T], T]]

  private val iterator =
    new Streamable[Iterator[AnyRef]] {
      type Out = AnyRef
      def apply(value: Iterator[AnyRef]) = Spout.fromIterator(value)
    }
  implicit def forIterator[T]: Aux[Iterator[T], T] = iterator.asInstanceOf[Aux[Iterator[T], T]]

  private val publisher =
    new Streamable[Publisher[AnyRef]] {
      type Out = AnyRef
      def apply(value: Publisher[AnyRef]) = Spout.fromPublisher(value)
    }
  implicit def forPublisher[T]: Aux[Publisher[T], T] =
    publisher.asInstanceOf[Aux[Publisher[T], T]]

  private val ringBuffer =
    new Streamable[RingBuffer[AnyRef]] {
      type Out = AnyRef
      def apply(value: RingBuffer[AnyRef]) = Spout.fromRingBuffer(value)
    }
  private[swave] implicit def forRingBuffer[T]: Aux[RingBuffer[T], T] =
    ringBuffer.asInstanceOf[Aux[RingBuffer[T], T]]

  private val future =
    new Streamable[Future[AnyRef]] {
      type Out = AnyRef
      def apply(value: Future[AnyRef]) = Spout.fromFuture(value)
    }
  implicit def forFuture[T]: Aux[Future[T], T] = future.asInstanceOf[Aux[Future[T], T]]

  private val tryy =
    new Streamable[Try[AnyRef]] {
      type Out = AnyRef
      def apply(value: Try[AnyRef]) = Spout.fromTry(value)
    }
  implicit def forTry[T]: Aux[Try[T], T] = tryy.asInstanceOf[Aux[Try[T], T]]

  implicit def forBytes[T](implicit ev: Bytes[T]): Aux[T, Byte] =
    new Streamable[T] {
      type Out = Byte
      def apply(value: T): Spout[Byte] = Spout.fromIterator(ev.toSeq(value).iterator)
    }

  implicit def lazyStreamable[T, O](implicit ev: Streamable.Aux[T, O]): Aux[() ⇒ T, O] =
    new Streamable[() ⇒ T] {
      type Out = O
      def apply(f: () ⇒ T) = ev(f())
    }
} 
Example 7
Source File: SwavePublisherVerification.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.tck

import org.reactivestreams.Publisher
import org.reactivestreams.tck.{PublisherVerification, TestEnvironment}
import org.scalatest.testng.TestNGSuiteLike
import org.testng.SkipException
import swave.core._

abstract class SwavePublisherVerification[T](val testEnv: TestEnvironment, publisherShutdownTimeout: Long)
    extends PublisherVerification[T](testEnv, publisherShutdownTimeout) with TestNGSuiteLike with StreamEnvShutdown {

  def this(printlnDebug: Boolean) =
    this(
      new TestEnvironment(Timeouts.defaultTimeout.toMillis, printlnDebug),
      Timeouts.publisherShutdownTimeout.toMillis)

  def this() = this(false)

  override def createFailedPublisher(): Publisher[T] =
    Spout.failing[T](new Exception("Nope")).drainTo(Drain.toPublisher()).get

  override def required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber(): Unit =
    throw new SkipException("Not relevant for publisher w/o fanout support")
} 
Example 8
Source File: SwaveIdentityProcessorVerification.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.tck

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}
import org.reactivestreams.Publisher
import org.reactivestreams.tck.{IdentityProcessorVerification, TestEnvironment}
import org.scalatest.testng.TestNGSuiteLike
import org.testng.SkipException
import org.testng.annotations.AfterClass
import swave.core._

abstract class SwaveIdentityProcessorVerification[T](val testEnv: TestEnvironment, publisherShutdownTimeout: Long)
    extends IdentityProcessorVerification[T](testEnv, publisherShutdownTimeout) with TestNGSuiteLike
    with StreamEnvShutdown {

  def this(printlnDebug: Boolean) =
    this(
      new TestEnvironment(Timeouts.defaultTimeout.toMillis, printlnDebug),
      Timeouts.publisherShutdownTimeout.toMillis)

  def this() = this(false)

  override def createFailedPublisher(): Publisher[T] =
    Spout.failing[T](new Exception("Nope")).drainTo(Drain.toPublisher()).get

  // Publishers created by swave don't support fanout by default
  override def maxSupportedSubscribers: Long = 1L

  override def required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber(): Unit =
    throw new SkipException("Not relevant for publisher w/o fanout support")

  override lazy val publisherExecutorService: ExecutorService =
    Executors.newFixedThreadPool(3)

  @AfterClass
  def shutdownPublisherExecutorService(): Unit = {
    publisherExecutorService.shutdown()
    publisherExecutorService.awaitTermination(3, TimeUnit.SECONDS)
  }
} 
Example 9
Source File: AsyncHttpClientFutureBackend.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.asynchttpclient.future

import java.nio.ByteBuffer

import io.netty.buffer.ByteBuf
import org.asynchttpclient.{
  AsyncHttpClient,
  AsyncHttpClientConfig,
  BoundRequestBuilder,
  DefaultAsyncHttpClient,
  DefaultAsyncHttpClientConfig
}
import org.reactivestreams.Publisher
import sttp.client.asynchttpclient.{AsyncHttpClientBackend, WebSocketHandler}
import sttp.client.monad.FutureMonad
import sttp.client.testing.SttpBackendStub
import sttp.client.{FollowRedirectsBackend, SttpBackend, SttpBackendOptions}

import scala.concurrent.{ExecutionContext, Future}

class AsyncHttpClientFutureBackend private (
    asyncHttpClient: AsyncHttpClient,
    closeClient: Boolean,
    customizeRequest: BoundRequestBuilder => BoundRequestBuilder
)(implicit
    ec: ExecutionContext
) extends AsyncHttpClientBackend[Future, Nothing](asyncHttpClient, new FutureMonad, closeClient, customizeRequest) {
  override protected def streamBodyToPublisher(s: Nothing): Publisher[ByteBuf] =
    s // nothing is everything

  override protected def publisherToStreamBody(p: Publisher[ByteBuffer]): Nothing =
    throw new IllegalStateException("This backend does not support streaming")
}

object AsyncHttpClientFutureBackend {
  private def apply(
      asyncHttpClient: AsyncHttpClient,
      closeClient: Boolean,
      customizeRequest: BoundRequestBuilder => BoundRequestBuilder
  )(implicit
      ec: ExecutionContext
  ): SttpBackend[Future, Nothing, WebSocketHandler] =
    new FollowRedirectsBackend[Future, Nothing, WebSocketHandler](
      new AsyncHttpClientFutureBackend(asyncHttpClient, closeClient, customizeRequest)
    )

  
  def stub(implicit
      ec: ExecutionContext = ExecutionContext.global
  ): SttpBackendStub[Future, Nothing, WebSocketHandler] =
    SttpBackendStub(new FutureMonad())
} 
Example 10
Source File: SpringCloudGatewayHttpResponse.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.gateway.springcloud.http

import java.util.function.BiFunction

import com.webank.wedatasphere.linkis.common.conf.Configuration
import com.webank.wedatasphere.linkis.gateway.http.GatewayHttpResponse
import javax.servlet.http.Cookie
import org.reactivestreams.Publisher
import org.springframework.http.server.reactive.{AbstractServerHttpResponse, ServerHttpResponse}
import org.springframework.http.{HttpStatus, ResponseCookie}
import reactor.core.publisher.{Flux, Mono}
import reactor.ipc.netty.http.server.HttpServerResponse
import reactor.ipc.netty.http.websocket.{WebsocketInbound, WebsocketOutbound}


class SpringCloudGatewayHttpResponse(response: ServerHttpResponse) extends GatewayHttpResponse {

  private val cachedHTTPResponseMsg = new StringBuilder
  private val cachedWebSocketResponseMsg = new StringBuilder
  private val cachedRedirectUrlMsg = new StringBuilder
  private var responseMono: Mono[Void] = _

  override def addCookie(cookie: Cookie): Unit = {
    val responseCookie = ResponseCookie.from(cookie.getName, cookie.getValue)
    responseCookie.maxAge(cookie.getMaxAge)
    responseCookie.secure(cookie.getSecure)
    responseCookie.path(cookie.getPath)
    responseCookie.domain(cookie.getDomain)
    responseCookie.httpOnly(cookie.isHttpOnly)
    response.addCookie(responseCookie.build())
  }

  override def setHeader(key: String, value: String): Unit = response.getHeaders.add(key, value)

  override def setStatus(status: Int): Unit = response.setStatusCode(HttpStatus.valueOf(status))

  override def write(message: String): Unit = cachedHTTPResponseMsg.append(message)

  override def sendResponse(): Unit = if(responseMono == null) synchronized {
    if(responseMono != null) return
    if(cachedRedirectUrlMsg.nonEmpty) {
      if(response.getStatusCode == null || (response.getStatusCode != null && !response.getStatusCode.is3xxRedirection()))
        response.setStatusCode(HttpStatus.TEMPORARY_REDIRECT)
      response.getHeaders.set("Location", cachedRedirectUrlMsg.toString)
      responseMono = response.setComplete()
      return
    }
    setHeader("Content-Type", "application/json;charset=UTF-8")
    if(cachedHTTPResponseMsg.nonEmpty) {
      val dataBuffer = response.bufferFactory().wrap(cachedHTTPResponseMsg.toString.getBytes(Configuration.BDP_ENCODING.getValue))
      val messageFlux = Flux.just(Array(dataBuffer): _*)
      responseMono = response.writeWith(messageFlux)
    } else if(cachedWebSocketResponseMsg.nonEmpty) {
      response match {
        case abstractResponse: AbstractServerHttpResponse =>
          val nativeResponse = abstractResponse.getNativeResponse.asInstanceOf[HttpServerResponse]
          responseMono = nativeResponse.sendWebsocket(new BiFunction[WebsocketInbound, WebsocketOutbound, Publisher[Void]] {
            override def apply(in: WebsocketInbound, out: WebsocketOutbound): Publisher[Void] = {
              val dataBuffer = response.bufferFactory().wrap(cachedWebSocketResponseMsg.toString.getBytes(Configuration.BDP_ENCODING.getValue))
              SpringCloudHttpUtils.sendWebSocket(out, dataBuffer)
            }
          })
        case _ =>
      }
    }
  }

  override def isCommitted: Boolean = responseMono != null

  def getResponseMono: Mono[Void] = responseMono

  override def writeWebSocket(message: String): Unit = cachedWebSocketResponseMsg.append(message)

  override def redirectTo(url: String): Unit = cachedRedirectUrlMsg.append(url)
} 
Example 11
Source File: AsyncHttpClientScalazBackend.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.asynchttpclient.scalaz

import java.nio.ByteBuffer

import io.netty.buffer.ByteBuf
import org.asynchttpclient.{
  AsyncHttpClient,
  AsyncHttpClientConfig,
  BoundRequestBuilder,
  DefaultAsyncHttpClient,
  DefaultAsyncHttpClientConfig
}
import org.reactivestreams.Publisher
import scalaz.concurrent.Task
import sttp.client.asynchttpclient.{AsyncHttpClientBackend, WebSocketHandler}
import sttp.client.impl.scalaz.TaskMonadAsyncError
import sttp.client.testing.SttpBackendStub
import sttp.client.{FollowRedirectsBackend, SttpBackend, SttpBackendOptions}

class AsyncHttpClientScalazBackend private (
    asyncHttpClient: AsyncHttpClient,
    closeClient: Boolean,
    customizeRequest: BoundRequestBuilder => BoundRequestBuilder
) extends AsyncHttpClientBackend[Task, Nothing](asyncHttpClient, TaskMonadAsyncError, closeClient, customizeRequest) {
  override protected def streamBodyToPublisher(s: Nothing): Publisher[ByteBuf] =
    s // nothing is everything

  override protected def publisherToStreamBody(p: Publisher[ByteBuffer]): Nothing =
    throw new IllegalStateException("This backend does not support streaming")
}

object AsyncHttpClientScalazBackend {
  private def apply(
      asyncHttpClient: AsyncHttpClient,
      closeClient: Boolean,
      customizeRequest: BoundRequestBuilder => BoundRequestBuilder
  ): SttpBackend[Task, Nothing, WebSocketHandler] =
    new FollowRedirectsBackend[Task, Nothing, WebSocketHandler](
      new AsyncHttpClientScalazBackend(asyncHttpClient, closeClient, customizeRequest)
    )

  def apply(
      options: SttpBackendOptions = SttpBackendOptions.Default,
      customizeRequest: BoundRequestBuilder => BoundRequestBuilder = identity
  ): Task[SttpBackend[Task, Nothing, WebSocketHandler]] =
    Task.delay(
      AsyncHttpClientScalazBackend(AsyncHttpClientBackend.defaultClient(options), closeClient = true, customizeRequest)
    )

  def usingConfig(
      cfg: AsyncHttpClientConfig,
      customizeRequest: BoundRequestBuilder => BoundRequestBuilder = identity
  ): Task[SttpBackend[Task, Nothing, WebSocketHandler]] =
    Task.delay(AsyncHttpClientScalazBackend(new DefaultAsyncHttpClient(cfg), closeClient = true, customizeRequest))

  
  def stub: SttpBackendStub[Task, Nothing, WebSocketHandler] = SttpBackendStub(TaskMonadAsyncError)
} 
Example 12
Source File: ArchiveSupport.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.http.scaladsl.marshalling.Marshaller
import akka.http.scaladsl.marshalling.Marshalling.WithFixedContentType
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.{Flow, Source}
import akkaviz.config.Config
import akkaviz.persistence.{PersistenceSources, ReceivedRecord}
import akkaviz.rest
import com.datastax.driver.core.utils.UUIDs
import org.reactivestreams.Publisher

import scala.concurrent.ExecutionContext.Implicits.global

trait ArchiveSupport {

  def isArchiveEnabled: Boolean

  def receivedOf(ref: String): Source[ReceivedRecord, _]

  def receivedBetween(ref: String, ref2: String): Source[ReceivedRecord, _]

  def archiveRouting: Route = get {
    pathPrefix("messages") {
      if (isArchiveEnabled) {
        path("of" / Segment) {
          ref =>
            AkkaHttpHelpers.completeAsJson(receivedOf(ref).via(receivedRecordToRestReceived))
        } ~
          path("between" / Segment / Segment) {
            (ref, ref2) =>
              AkkaHttpHelpers.completeAsJson(receivedBetween(ref, ref2).via(receivedRecordToRestReceived))
          }
      } else {
        reject
      }
    }
  }

  private[this] implicit val receivedRecordMarshaller: Marshaller[rest.Received, String] = Marshaller.strict {
    received =>
      WithFixedContentType(MediaTypes.`application/json`, () => upickle.default.write(received))
  }

  private[this] def receivedRecordToRestReceived = Flow[ReceivedRecord].map {
    rr =>
      rest.Received(rr.millis, rr.direction, rr.first, rr.second, rr.data)
  }

} 
Example 13
Source File: PersistenceSources.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.persistence

import akka.stream.scaladsl.Source
import io.getquill._
import io.getquill.naming.SnakeCase
import org.reactivestreams.Publisher
import monifu.concurrent.Implicits.globalScheduler

import scala.util.control.NonFatal

object PersistenceSources {

  private[this] lazy val db = source(new CassandraStreamSourceConfig[SnakeCase]("akkaviz.cassandra"))

  def of(ref: String): Source[ReceivedRecord, _] = {
    try {
      Source.fromPublisher(db.run(Queries.getAllFor)(ref))
    } catch {
      case NonFatal(e) =>
        Source.failed(e)
    }
  }

  def between(ref: String, ref2: String): Source[ReceivedRecord, _] = {
    try {
      Source.fromPublisher(db.run(Queries.getBetween)(ref, ref2, To))
    } catch {
      case NonFatal(e) =>
        Source.failed(e)
    }
  }

} 
Example 14
Source File: LocalGraph.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.graph

import akka.actor.ActorSystem
import akka.stream.impl.Stages.DefaultAttributes
import akka.stream.impl.StreamLayout.Module
import akka.stream.impl.{PublisherSource, SubscriberSink}
import akka.stream.{SinkShape, SourceShape}
import org.apache.gearpump.akkastream.GearpumpMaterializer.Edge
import org.apache.gearpump.akkastream.materializer.LocalMaterializerImpl
import org.apache.gearpump.akkastream.module.{SinkBridgeModule, SourceBridgeModule}
import org.apache.gearpump.util.Graph
import org.reactivestreams.{Publisher, Subscriber}


    override def materialize(graph: SubGraph,
        matValues: scala.collection.mutable.Map[Module, Any]):
        scala.collection.mutable.Map[Module, Any] = {
      val newGraph: Graph[Module, Edge] = graph.graph.mapVertex {
        case source: SourceBridgeModule[in, out] =>
          val subscriber = matValues(source).asInstanceOf[Subscriber[in]]
          val shape: SinkShape[in] = SinkShape(source.inPort)
          new SubscriberSink(subscriber, DefaultAttributes.subscriberSink, shape)
        case sink: SinkBridgeModule[in, out] =>
          val publisher = matValues(sink).asInstanceOf[Publisher[out]]
          val shape: SourceShape[out] = SourceShape(sink.outPort)
          new PublisherSource(publisher, DefaultAttributes.publisherSource, shape)
        case other =>
          other
      }
      materializer.materialize(newGraph, matValues)
    }

    override def shutdown: Unit = {
      materializer.shutdown()
    }
  }
} 
Example 15
Source File: SinkBridgeTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.time.Instant
import java.util
import java.util.concurrent.TimeUnit

import akka.actor.Actor.Receive
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.util.Timeout
import org.apache.gearpump.Message
import org.apache.gearpump.akkastream.task.SinkBridgeTask.RequestMessage
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.streaming.ProcessorId
import org.apache.gearpump.streaming.appmaster.AppMaster.{LookupTaskActorRef, TaskActorRef}
import org.apache.gearpump.streaming.task.{Task, TaskContext, TaskId}
import org.apache.gearpump.util.LogUtil
import org.reactivestreams.{Publisher, Subscriber, Subscription}


class SinkBridgeTask(taskContext : TaskContext, userConf : UserConfig)
  extends Task(taskContext, userConf) {
  import taskContext.taskId

  val queue = new util.LinkedList[Message]()
  var subscriber: ActorRef = _

  var request: Int = 0

  override def onStart(startTime : Instant) : Unit = {}

  override def onNext(msg: Message) : Unit = {
    queue.add(msg)
    trySendingData()
  }

  override def onStop() : Unit = {}

  private def trySendingData(): Unit = {
    if (subscriber != null) {
      (0 to request).map(_ => queue.poll()).filter(_ != null).foreach { msg =>
        subscriber ! msg.value
        request -= 1
      }
    }
  }

  override def receiveUnManagedMessage: Receive = {
    case RequestMessage(n) =>
      this.subscriber = sender
      LOG.info("the downstream has requested " + n + " messages from " + subscriber)
      request += n.toInt
      trySendingData()
    case msg =>
      LOG.error("Failed! Received unknown message " + "taskId: " + taskId + ", " + msg.toString)
  }
}

object SinkBridgeTask {

  case class RequestMessage(number: Int)

  class SinkBridgeTaskClient(system: ActorSystem, context: ClientContext, appId: Int,
      processorId: ProcessorId) extends Publisher[AnyRef] with Subscription {
    private val taskId = TaskId(processorId, index = 0)
    private val LOG = LogUtil.getLogger(getClass)

    private var actor: ActorRef = _
    import system.dispatcher

    private val task =
      context.askAppMaster[TaskActorRef](appId, LookupTaskActorRef(taskId)).map{container =>
      // println("Successfully resolved taskRef for taskId " + taskId + ", " + container.task)
      container.task
    }

    override def subscribe(subscriber: Subscriber[_ >: AnyRef]): Unit = {
      this.actor = system.actorOf(Props(new ClientActor(subscriber)))
      subscriber.onSubscribe(this)
    }

    override def cancel(): Unit = Unit

    private implicit val timeout = Timeout(5, TimeUnit.SECONDS)

    override def request(l: Long): Unit = {
      task.foreach{ task =>
        task.tell(RequestMessage(l.toInt), actor)
      }
    }
  }

  class ClientActor(subscriber: Subscriber[_ >: AnyRef]) extends Actor {
    def receive: Receive = {
      case result: AnyRef =>
        subscriber.onNext(result)
    }
  }
} 
Example 16
Source File: DummyModule.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.module

import akka.stream.impl.StreamLayout.{AtomicModule, Module}
import akka.stream.impl.{SinkModule, SourceModule}
import akka.stream.{Attributes, MaterializationContext, SinkShape, SourceShape}
import org.reactivestreams.{Publisher, Subscriber}


class DummySink[IN](val attributes: Attributes, shape: SinkShape[IN])
  extends SinkModule[IN, Unit](shape) with DummyModule {
  override def create(context: MaterializationContext): (Subscriber[IN], Unit) = {
    throw new UnsupportedOperationException()
  }

  override protected def newInstance(shape: SinkShape[IN]): SinkModule[IN, Unit] = {
    new DummySink[IN](attributes, shape)
  }

  override def withAttributes(attr: Attributes): Module = {
    new DummySink[IN](attr, amendShape(attr))
  }
} 
Example 17
Source File: Streamed.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.ws.ahc

import java.net.URI

import org.reactivestreams.Subscriber
import org.reactivestreams.Subscription
import org.reactivestreams.Publisher
import play.shaded.ahc.io.netty.handler.codec.http.HttpHeaders
import akka.Done
import play.shaded.ahc.org.asynchttpclient.AsyncHandler.State
import play.shaded.ahc.org.asynchttpclient._
import play.shaded.ahc.org.asynchttpclient.handler.StreamedAsyncHandler

import scala.concurrent.Promise

case class StreamedState(
    statusCode: Int = -1,
    statusText: String = "",
    uriOption: Option[URI] = None,
    responseHeaders: Map[String, scala.collection.Seq[String]] = Map.empty,
    publisher: Publisher[HttpResponseBodyPart] = EmptyPublisher
)

class DefaultStreamedAsyncHandler[T](
    f: java.util.function.Function[StreamedState, T],
    streamStarted: Promise[T],
    streamDone: Promise[Done]
) extends StreamedAsyncHandler[Unit]
    with AhcUtilities {
  private var state = StreamedState()

  def onStream(publisher: Publisher[HttpResponseBodyPart]): State = {
    if (this.state.publisher != EmptyPublisher) State.ABORT
    else {
      this.state = state.copy(publisher = publisher)
      streamStarted.success(f(state))
      State.CONTINUE
    }
  }

  override def onStatusReceived(status: HttpResponseStatus): State = {
    if (this.state.publisher != EmptyPublisher) State.ABORT
    else {
      state = state.copy(
        statusCode = status.getStatusCode,
        statusText = status.getStatusText,
        uriOption = Option(status.getUri.toJavaNetURI)
      )
      State.CONTINUE
    }
  }

  override def onHeadersReceived(h: HttpHeaders): State = {
    if (this.state.publisher != EmptyPublisher) State.ABORT
    else {
      state = state.copy(responseHeaders = headersToMap(h))
      State.CONTINUE
    }
  }

  override def onBodyPartReceived(bodyPart: HttpResponseBodyPart): State =
    throw new IllegalStateException("Should not have received bodypart")

  override def onCompleted(): Unit = {
    // EmptyPublisher can be replaces with `Source.empty` when we carry out the refactoring
    // mentioned in the `execute2` method.
    streamStarted.trySuccess(f(state.copy(publisher = EmptyPublisher)))
    streamDone.trySuccess(Done)
  }

  override def onThrowable(t: Throwable): Unit = {
    streamStarted.tryFailure(t)
    streamDone.tryFailure(t)
  }
}

private case object EmptyPublisher extends Publisher[HttpResponseBodyPart] {
  def subscribe(s: Subscriber[_ >: HttpResponseBodyPart]): Unit = {
    if (s eq null)
      throw new NullPointerException("Subscriber must not be null, rule 1.9")
    s.onSubscribe(CancelledSubscription)
    s.onComplete()
  }
  private case object CancelledSubscription extends Subscription {
    override def request(elements: Long): Unit = ()
    override def cancel(): Unit                = ()
  }
} 
Example 18
Source File: Ctx.scala    From sangria-subscriptions-example   with Apache License 2.0 5 votes vote down vote up
import akka.NotUsed
import akka.util.Timeout
import schema.MutationError
import akka.actor.ActorRef
import generic.Event
import generic.MemoryEventStore._
import generic.View.{Get, GetMany}
import akka.pattern.ask
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Source
import org.reactivestreams.Publisher

import scala.concurrent.{ExecutionContext, Future}

case class Ctx(
  authors: ActorRef,
  articles: ActorRef,
  eventStore: ActorRef,
  eventStorePublisher: Publisher[Event],
  ec: ExecutionContext,
  to: Timeout
) extends Mutation {
  implicit def executionContext = ec
  implicit def timeout = to

  lazy val eventStream: Source[Event, NotUsed] =
    Source.fromPublisher(eventStorePublisher).buffer(100, OverflowStrategy.fail)

  def addEvent[T](view: ActorRef, event: Event) =
    (eventStore ? AddEvent(event)).flatMap {
      case EventAdded(_) ⇒
        (view ? Get(event.id, Some(event.version))).mapTo[Option[T]]
      case OverCapacity(_) ⇒
        throw MutationError("Service is overloaded.")
      case ConcurrentModification(_, latestVersion) ⇒
        throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.")
    }

  def addDeleteEvent(event: Event) =
    (eventStore ? AddEvent(event)).map {
      case EventAdded(e) ⇒  e
      case OverCapacity(_) ⇒
        throw MutationError("Service is overloaded.")
      case ConcurrentModification(_, latestVersion) ⇒
        throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.")
    }

  def loadLatestVersion(id: String, version: Long): Future[Long] =
    (eventStore ? LatestEventVersion(id)) map {
      case Some(latestVersion: Long) if version != latestVersion ⇒
        throw MutationError(s"Concurrent Modification error for entity '$id'. Latest entity version is '$latestVersion'.")
      case Some(version: Long) ⇒
        version + 1
      case _ ⇒
        throw MutationError(s"Entity with ID '$id' does not exist.")
    }

  def loadAuthors(ids: Seq[String]) =
    (authors ? GetMany(ids)).mapTo[Seq[Author]]
}