cats.effect.ContextShift Scala Examples

The following examples show how to use cats.effect.ContextShift. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: FinatraServerCatsTests.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.finatra.cats

import cats.data.NonEmptyList
import cats.effect.{ContextShift, IO, Resource, Timer}
import sttp.tapir.Endpoint
import sttp.tapir.server.finatra.{FinatraRoute, FinatraServerOptions, FinatraServerTests}
import sttp.tapir.server.tests.ServerTests
import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint}
import sttp.tapir.tests.{Port, PortCounter}

import scala.concurrent.ExecutionContext
import scala.reflect.ClassTag

class FinatraServerCatsTests extends ServerTests[IO, Nothing, FinatraRoute] {
  override def streamingSupport: Boolean = false

  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global
  implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)
  implicit val timer: Timer[IO] = IO.timer(ec)

  override def pureResult[T](t: T): IO[T] = IO.pure(t)
  override def suspendResult[T](t: => T): IO[T] = IO.apply(t)

  override def route[I, E, O](
      e: ServerEndpoint[I, E, O, Nothing, IO],
      decodeFailureHandler: Option[DecodeFailureHandler] = None
  ): FinatraRoute = {
    implicit val serverOptions: FinatraServerOptions =
      FinatraServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler))
    e.toRoute
  }

  override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => IO[O])(implicit
      eClassTag: ClassTag[E]
  ): FinatraRoute = e.toRouteRecoverErrors(fn)

  override def server(routes: NonEmptyList[FinatraRoute], port: Port): Resource[IO, Unit] = FinatraServerTests.server(routes, port)

  override lazy val portCounter: PortCounter = new PortCounter(59000)
} 
Example 2
Source File: ExperimentVariantEventPostgresqlServiceTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.postgresql.abtesting

import cats.effect.{ContextShift, IO}
import domains.abtesting.events.impl.ExperimentVariantEventPostgresqlService
import domains.abtesting.AbstractExperimentServiceTest
import domains.abtesting.events.ExperimentVariantEventService
import env.{DbDomainConfig, DbDomainConfigDetails, PostgresqlConfig}
import libs.logs.ZLogger
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import store.postgresql.PostgresqlClient
import zio.{Exit, Reservation}

class ExperimentVariantEventPostgresqlServiceTest
    extends AbstractExperimentServiceTest("Postgresql")
    with BeforeAndAfter
    with BeforeAndAfterAll {

  implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global)
  import zio.interop.catz._

  private val pgConfig = PostgresqlConfig(
    "org.postgresql.Driver",
    "jdbc:postgresql://localhost:5555/izanami",
    "izanami",
    "izanami",
    32,
    None
  )

  val rPgClient: Reservation[ZLogger, Throwable, Option[PostgresqlClient]] = runtime.unsafeRun(
    PostgresqlClient
      .postgresqlClient(
        system,
        Some(pgConfig)
      )
      .reserve
      .provideLayer(ZLogger.live)
  )

  private val client: Option[PostgresqlClient] = runtime.unsafeRun(rPgClient.acquire.provideLayer(ZLogger.live))

  override def dataStore(name: String): ExperimentVariantEventService.Service = ExperimentVariantEventPostgresqlService(
    client.get,
    DbDomainConfig(env.Postgresql, DbDomainConfigDetails(name, None), None)
  )

  override protected def afterAll(): Unit = {
    super.afterAll()
    runtime.unsafeRun(rPgClient.release(Exit.unit).provideLayer(ZLogger.live))
  }
} 
Example 3
Source File: CorrelationIdMiddlewareTest.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.http4s.server.middleware

import java.net.InetSocketAddress

import cats.effect.{ContextShift, IO, Resource, Timer}
import com.avast.sst.http4s.server.Http4sRouting
import org.http4s.client.blaze.BlazeClientBuilder
import org.http4s.dsl.Http4sDsl
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.util.CaseInsensitiveString
import org.http4s.{Header, HttpRoutes, Request, Uri}
import org.scalatest.funsuite.AsyncFunSuite

import scala.concurrent.ExecutionContext

@SuppressWarnings(Array("scalafix:Disable.get", "scalafix:Disable.toString", "scalafix:Disable.createUnresolved"))
class CorrelationIdMiddlewareTest extends AsyncFunSuite with Http4sDsl[IO] {

  implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global)

  test("CorrelationIdMiddleware fills Request attributes and HTTP response header") {
    val test = for {
      middleware <- Resource.liftF(CorrelationIdMiddleware.default[IO])
      routes = Http4sRouting.make {
        middleware.wrap {
          HttpRoutes.of[IO] {
            case req @ GET -> Root / "test" =>
              val id = middleware.retrieveCorrelationId(req)
              Ok("test").map(_.withHeaders(Header("Attribute-Value", id.toString)))
          }
        }
      }
      server <- BlazeServerBuilder[IO](ExecutionContext.global)
        .bindSocketAddress(InetSocketAddress.createUnresolved("127.0.0.1", 0))
        .withHttpApp(routes)
        .resource
      client <- BlazeClientBuilder[IO](ExecutionContext.global).resource
    } yield (server, client)

    test
      .use {
        case (server, client) =>
          client
            .run(
              Request[IO](uri = Uri.unsafeFromString(s"http://${server.address.getHostString}:${server.address.getPort}/test"))
                .withHeaders(Header("Correlation-Id", "test-value"))
            )
            .use { response =>
              IO.delay {
                assert(response.headers.get(CaseInsensitiveString("Correlation-Id")).get.value === "test-value")
                assert(response.headers.get(CaseInsensitiveString("Attribute-Value")).get.value === "Some(CorrelationId(test-value))")
              }
            }
      }
      .unsafeToFuture()
  }

} 
Example 4
Source File: Fs2KafkaModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.fs2kafka

import cats.effect.{Blocker, ConcurrentEffect, ContextShift, Resource, Timer}
import fs2.kafka._

object Fs2KafkaModule {

  def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K: Deserializer[F, *], V: Deserializer[F, *]](
      config: ConsumerConfig,
      blocker: Option[Blocker] = None,
      createConsumer: Option[Map[String, String] => F[KafkaByteConsumer]] = None
  ): Resource[F, KafkaConsumer[F, K, V]] = {
    def setOpt[A](maybeValue: Option[A])(
        setter: (ConsumerSettings[F, K, V], A) => ConsumerSettings[F, K, V]
    )(initial: ConsumerSettings[F, K, V]): ConsumerSettings[F, K, V] =
      maybeValue match {
        case Some(value) => setter(initial, value)
        case None        => initial
      }

    val settings = ConsumerSettings(implicitly[Deserializer[F, K]], implicitly[Deserializer[F, V]])
      .withBootstrapServers(config.bootstrapServers.mkString(","))
      .withGroupId(config.groupId)
      .pipe(setOpt(config.groupInstanceId)(_.withGroupInstanceId(_)))
      .pipe(setOpt(config.clientId)(_.withClientId(_)))
      .pipe(setOpt(config.clientRack)(_.withClientRack(_)))
      .withAutoOffsetReset(config.autoOffsetReset)
      .withEnableAutoCommit(config.enableAutoCommit)
      .withAutoCommitInterval(config.autoCommitInterval)
      .withAllowAutoCreateTopics(config.allowAutoCreateTopics)
      .withCloseTimeout(config.closeTimeout)
      .withCommitRecovery(config.commitRecovery)
      .withCommitTimeout(config.closeTimeout)
      .withDefaultApiTimeout(config.defaultApiTimeout)
      .withHeartbeatInterval(config.heartbeatInterval)
      .withIsolationLevel(config.isolationLevel)
      .withMaxPrefetchBatches(config.maxPrefetchBatches)
      .withPollInterval(config.pollInterval)
      .withPollTimeout(config.pollTimeout)
      .withMaxPollInterval(config.maxPollInterval)
      .withMaxPollRecords(config.maxPollRecords)
      .withRequestTimeout(config.requestTimeout)
      .withSessionTimeout(config.sessionTimeout)
      .pipe(setOpt(blocker)(_.withBlocker(_)))
      .withProperties(config.properties)
      .pipe(setOpt(createConsumer)(_.withCreateConsumer(_)))

    makeConsumer(settings)
  }

  def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K, V](
      settings: ConsumerSettings[F, K, V]
  ): Resource[F, KafkaConsumer[F, K, V]] = consumerResource[F].using(settings)

  def makeProducer[F[_]: ConcurrentEffect: ContextShift, K: Serializer[F, *], V: Serializer[F, *]](
      config: ProducerConfig,
      blocker: Option[Blocker] = None,
      createProducer: Option[Map[String, String] => F[KafkaByteProducer]] = None
  ): Resource[F, KafkaProducer[F, K, V]] = {
    def setOpt[A](maybeValue: Option[A])(
        setter: (ProducerSettings[F, K, V], A) => ProducerSettings[F, K, V]
    )(initial: ProducerSettings[F, K, V]): ProducerSettings[F, K, V] =
      maybeValue match {
        case Some(value) => setter(initial, value)
        case None        => initial
      }

    val settings = ProducerSettings(implicitly[Serializer[F, K]], implicitly[Serializer[F, V]])
      .withBootstrapServers(config.bootstrapServers.mkString(","))
      .pipe(setOpt(config.clientId)(_.withClientId(_)))
      .withAcks(config.acks)
      .withBatchSize(config.batchSize)
      .withCloseTimeout(config.closeTimeout)
      .withDeliveryTimeout(config.deliveryTimeout)
      .withRequestTimeout(config.requestTimeout)
      .withLinger(config.linger)
      .withEnableIdempotence(config.enableIdempotence)
      .withMaxInFlightRequestsPerConnection(config.maxInFlightRequestsPerConnection)
      .withParallelism(config.parallelism)
      .withRetries(config.retries)
      .pipe(setOpt(blocker)(_.withBlocker(_)))
      .withProperties(config.properties)
      .pipe(setOpt(createProducer)(_.withCreateProducer(_)))

    makeProducer(settings)
  }

  def makeProducer[F[_]: ConcurrentEffect: ContextShift, K, V](settings: ProducerSettings[F, K, V]): Resource[F, KafkaProducer[F, K, V]] =
    producerResource[F].using(settings)

  
  implicit private final class ChainingOps[A](private val self: A) extends AnyVal {
    def pipe[B](f: A => B): B = f(self)
  }

} 
Example 5
Source File: DoobieHikariModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.doobie

import java.util.Properties
import java.util.concurrent.{ScheduledExecutorService, ThreadFactory}

import cats.Show
import cats.effect.{Async, Blocker, ContextShift, Resource, Sync}
import cats.syntax.show._
import com.zaxxer.hikari.HikariConfig
import com.zaxxer.hikari.metrics.MetricsTrackerFactory
import doobie.enum.TransactionIsolation
import doobie.hikari.HikariTransactor

import scala.concurrent.ExecutionContext

object DoobieHikariModule {

  
  def make[F[_]: Async](
      config: DoobieHikariConfig,
      boundedConnectExecutionContext: ExecutionContext,
      blocker: Blocker,
      metricsTrackerFactory: Option[MetricsTrackerFactory] = None
  )(implicit cs: ContextShift[F]): Resource[F, HikariTransactor[F]] = {
    for {
      hikariConfig <- Resource.liftF(makeHikariConfig(config, metricsTrackerFactory))
      transactor <- HikariTransactor.fromHikariConfig(hikariConfig, boundedConnectExecutionContext, blocker)
    } yield transactor
  }

  implicit private val transactionIsolationShow: Show[TransactionIsolation] = {
    case TransactionIsolation.TransactionNone            => "TRANSACTION_NONE"
    case TransactionIsolation.TransactionReadUncommitted => "TRANSACTION_READ_UNCOMMITTED"
    case TransactionIsolation.TransactionReadCommitted   => "TRANSACTION_READ_COMMITTED"
    case TransactionIsolation.TransactionRepeatableRead  => "TRANSACTION_REPEATABLE_READ"
    case TransactionIsolation.TransactionSerializable    => "TRANSACTION_SERIALIZABLE"
  }

  private def makeHikariConfig[F[_]: Sync](
      config: DoobieHikariConfig,
      metricsTrackerFactory: Option[MetricsTrackerFactory],
      scheduledExecutorService: Option[ScheduledExecutorService] = None,
      threadFactory: Option[ThreadFactory] = None
  ): F[HikariConfig] = {
    Sync[F].delay {
      val c = new HikariConfig()
      c.setDriverClassName(config.driver)
      c.setJdbcUrl(config.url)
      c.setUsername(config.username)
      c.setPassword(config.password)
      c.setAutoCommit(config.autoCommit)
      c.setConnectionTimeout(config.connectionTimeout.toMillis)
      c.setIdleTimeout(config.idleTimeout.toMillis)
      c.setMaxLifetime(config.maxLifeTime.toMillis)
      c.setMinimumIdle(config.minimumIdle)
      c.setMaximumPoolSize(config.maximumPoolSize)
      c.setReadOnly(config.readOnly)
      c.setAllowPoolSuspension(config.allowPoolSuspension)
      c.setIsolateInternalQueries(config.isolateInternalQueries)
      c.setRegisterMbeans(config.registerMBeans)
      val dataSourceProperties = new Properties()
      config.dataSourceProperties.foreach { case (k, v) => dataSourceProperties.put(k, v) }
      c.setDataSourceProperties(dataSourceProperties)

      config.leakDetectionThreshold.map(_.toMillis).foreach(c.setLeakDetectionThreshold)
      config.initializationFailTimeout.map(_.toMillis).foreach(c.setInitializationFailTimeout)
      config.poolName.foreach(c.setPoolName)
      config.validationTimeout.map(_.toMillis).foreach(c.setValidationTimeout)
      config.transactionIsolation.map(_.show).foreach(c.setTransactionIsolation)

      scheduledExecutorService.foreach(c.setScheduledExecutor)
      threadFactory.foreach(c.setThreadFactory)

      metricsTrackerFactory.foreach(c.setMetricsTrackerFactory)
      c
    }
  }

} 
Example 6
Source File: Http4sBlazeServerModuleTest.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.http4s.server

import cats.effect.{ContextShift, IO, Timer}
import com.avast.sst.http4s.client.{Http4sBlazeClientConfig, Http4sBlazeClientModule}
import org.http4s.HttpRoutes
import org.http4s.dsl.Http4sDsl
import org.scalatest.funsuite.AsyncFunSuite

import scala.concurrent.ExecutionContext

class Http4sBlazeServerModuleTest extends AsyncFunSuite with Http4sDsl[IO] {

  implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global)

  test("Simple HTTP server") {
    val routes = Http4sRouting.make(HttpRoutes.of[IO] {
      case GET -> Root / "test" => Ok("test")
    })
    val test = for {
      server <- Http4sBlazeServerModule.make[IO](Http4sBlazeServerConfig("127.0.0.1", 0), routes, ExecutionContext.global)
      client <- Http4sBlazeClientModule.make[IO](Http4sBlazeClientConfig(), ExecutionContext.global)
    } yield (server, client)

    test
      .use {
        case (server, client) =>
          client
            .expect[String](s"http://${server.address.getHostString}:${server.address.getPort}/test")
            .map(response => assert(response === "test"))
      }
      .unsafeToFuture()
  }

} 
Example 7
Source File: MySqlInvoiceList.scala    From event-sourcing-kafka-streams   with MIT License 5 votes vote down vote up
package org.amitayh.invoices.dao

import cats.Monad
import cats.effect.{Async, ContextShift, Resource}
import cats.syntax.functor._
import doobie.free.connection.ConnectionIO
import doobie.hikari.HikariTransactor
import doobie.implicits._
import doobie.util.ExecutionContexts
import doobie.util.transactor.Transactor

class MySqlInvoiceList[F[_]: Monad](transactor: Transactor[F]) extends InvoiceList[F] {
  override def save(record: InvoiceRecord): F[Unit] =
    MySqlInvoiceList.save(record).transact(transactor)

  override def get: F[List[InvoiceRecord]] =
    MySqlInvoiceList.get.transact(transactor)
}

object MySqlInvoiceList {
  def save(record: InvoiceRecord): ConnectionIO[Unit] = {
    import record._
    val sql = sql"""
      INSERT INTO invoices (id, version, updated_at, customer_name, customer_email, issue_date, due_date, total, status)
      VALUES ($id, $version, $updatedAt, $customerName, $customerEmail, $issueDate, $dueDate, $total, $status)
      ON DUPLICATE KEY UPDATE
        version = VALUES(version),
        updated_at = VALUES(updated_at),
        customer_name = VALUES(customer_name),
        customer_email = VALUES(customer_email),
        issue_date = VALUES(issue_date),
        due_date = VALUES(due_date),
        total = VALUES(total),
        status = VALUES(status)
    """
    sql.update.run.void
  }

  def get: ConnectionIO[List[InvoiceRecord]] = {
    val sql = sql"""
      SELECT id, version, updated_at, customer_name, customer_email, issue_date, due_date, total, status
      FROM invoices
      WHERE status IN ('New', 'Paid')
      ORDER BY created_at DESC
    """
    sql.query[InvoiceRecord].to[List]
  }

  def resource[F[_]: Async: ContextShift]: Resource[F, MySqlInvoiceList[F]] = for {
    connectEC <- ExecutionContexts.fixedThreadPool[F](32)
    transactEC <- ExecutionContexts.cachedThreadPool[F]
    transactor <- HikariTransactor.newHikariTransactor[F](
      driverClassName = sys.env("DB_DRIVER"),
      url = sys.env("DB_URL"),
      user = sys.env("DB_USER"),
      pass = sys.env("DB_PASS"),
      connectEC = connectEC,
      transactEC = transactEC)
  } yield new MySqlInvoiceList[F](transactor)
} 
Example 8
Source File: ListProjector.scala    From event-sourcing-kafka-streams   with MIT License 5 votes vote down vote up
package org.amitayh.invoices.projector

import java.util.UUID

import cats.effect.concurrent.Deferred
import cats.effect.{ContextShift, IO}
import cats.syntax.apply._
import org.amitayh.invoices.common.Config
import org.amitayh.invoices.common.domain.InvoiceSnapshot
import org.amitayh.invoices.common.serde.AvroSerde.SnapshotSerde
import org.amitayh.invoices.common.serde.UuidSerde
import org.amitayh.invoices.dao.{InvoiceList, InvoiceRecord, MySqlInvoiceList}
import org.amitayh.invoices.streamprocessor.StreamProcessorApp
import org.apache.kafka.streams.kstream.{Consumed, ForeachAction, KeyValueMapper}
import org.apache.kafka.streams.{KeyValue, StreamsBuilder, Topology}

import scala.concurrent.ExecutionContext.global

object ListProjector extends StreamProcessorApp {

  override def appId: String = "invoices.processor.list-projector"

  override def topology: Topology = ListProjectorTopology.create.unsafeRunSync()

}

object ListProjectorTopology {
  implicit val contextShift: ContextShift[IO] = IO.contextShift(global)

  def create: IO[Topology] = for {
    deferred <- Deferred[IO, Topology]
    _ <- MySqlInvoiceList.resource[IO].use { invoiceList =>
      buildTopology(invoiceList).flatMap(deferred.complete) *> IO.never
    }.start
    topology <- deferred.get
  } yield topology

  private def buildTopology(invoiceList: InvoiceList[IO]): IO[Topology] = IO {
    val builder = new StreamsBuilder

    val snapshots = builder.stream(
      Config.Topics.Snapshots.name,
      Consumed.`with`(UuidSerde, SnapshotSerde))

    snapshots
      .map[UUID, InvoiceRecord](ToRecord)
      .foreach(new SaveInvoiceRecord(invoiceList))

    builder.build()
  }
}

object ToRecord extends KeyValueMapper[UUID, InvoiceSnapshot, KeyValue[UUID, InvoiceRecord]] {
  override def apply(id: UUID, snapshot: InvoiceSnapshot): KeyValue[UUID, InvoiceRecord] =
    KeyValue.pair(id, InvoiceRecord(id, snapshot))
}

class SaveInvoiceRecord(invoicesList: InvoiceList[IO])
  extends ForeachAction[UUID, InvoiceRecord] {

  override def apply(id: UUID, value: InvoiceRecord): Unit =
    invoicesList.save(value).unsafeRunSync()

} 
Example 9
Source File: Statics.scala    From event-sourcing-kafka-streams   with MIT License 5 votes vote down vote up
package org.amitayh.invoices.web

import cats.effect.{ContextShift, Sync}
import org.http4s.dsl.Http4sDsl
import org.http4s.{HttpRoutes, StaticFile}

import scala.concurrent.ExecutionContext.global

class Statics[F[_]: Sync: ContextShift] extends Http4sDsl[F] {

  val service: HttpRoutes[F] = HttpRoutes.of[F] {
    case request @ GET -> fileName =>
      StaticFile
        .fromResource(
          name = s"/statics$fileName",
          blockingExecutionContext = global,
          req = Some(request),
          preferGzipped = true)
        .getOrElseF(NotFound())
  }

}

object Statics {
  def apply[F[_]: Sync: ContextShift]: Statics[F] = new Statics[F]
} 
Example 10
Source File: Http4sServerOptions.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.http4s

import java.io.File

import cats.Applicative
import cats.effect.{ContextShift, Sync}
import org.http4s.Request
import sttp.tapir.Defaults
import sttp.tapir.server.{DecodeFailureHandler, LogRequestHandling, ServerDefaults}

import scala.concurrent.ExecutionContext

case class Http4sServerOptions[F[_]](
    createFile: (ExecutionContext, Request[F]) => F[File], // TODO: include request/part headers, information if this is a part?
    blockingExecutionContext: ExecutionContext,
    ioChunkSize: Int,
    decodeFailureHandler: DecodeFailureHandler,
    logRequestHandling: LogRequestHandling[F[Unit]]
)

object Http4sServerOptions {
  implicit def default[F[_]: Sync: ContextShift]: Http4sServerOptions[F] =
    Http4sServerOptions(
      defaultCreateFile,
      ExecutionContext.Implicits.global,
      8192,
      ServerDefaults.decodeFailureHandler,
      defaultLogRequestHandling[F]
    )

  def defaultCreateFile[F[_]](implicit sync: Sync[F], cs: ContextShift[F]): (ExecutionContext, Request[F]) => F[File] =
    (ec, _) => cs.evalOn(ec)(sync.delay(Defaults.createTempFile()))

  def defaultLogRequestHandling[F[_]: Sync]: LogRequestHandling[F[Unit]] =
    LogRequestHandling[F[Unit]](
      doLogWhenHandled = debugLog[F],
      doLogAllDecodeFailures = debugLog[F],
      doLogLogicExceptions = (msg: String, ex: Throwable) => Sync[F].delay(EndpointToHttp4sServer.log.error(ex)(msg)),
      noLog = Applicative[F].unit
    )

  private def debugLog[F[_]: Sync](msg: String, exOpt: Option[Throwable]): F[Unit] =
    exOpt match {
      case None     => Sync[F].delay(EndpointToHttp4sServer.log.debug(msg))
      case Some(ex) => Sync[F].delay(EndpointToHttp4sServer.log.debug(ex)(msg))
    }
} 
Example 11
Source File: TapirHttp4sServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.http4s

import cats.Monad
import cats.effect.{ContextShift, Sync}
import cats.implicits._
import org.http4s.{EntityBody, HttpRoutes}
import sttp.tapir.Endpoint
import sttp.tapir.Endpoint
import sttp.tapir.server.ServerEndpoint
import sttp.tapir.typelevel.ReplaceFirstInTuple

import scala.reflect.ClassTag

trait TapirHttp4sServer {
  implicit class RichHttp4sHttpEndpoint[I, E, O, F[_]](e: Endpoint[I, E, O, EntityBody[F]]) {
    def toRoutes(
        logic: I => F[Either[E, O]]
    )(implicit serverOptions: Http4sServerOptions[F], fs: Sync[F], fcs: ContextShift[F]): HttpRoutes[F] = {
      new EndpointToHttp4sServer(serverOptions).toRoutes(e.serverLogic(logic))
    }

    def toRouteRecoverErrors(logic: I => F[O])(implicit
        serverOptions: Http4sServerOptions[F],
        fs: Sync[F],
        fcs: ContextShift[F],
        eIsThrowable: E <:< Throwable,
        eClassTag: ClassTag[E]
    ): HttpRoutes[F] = {
      new EndpointToHttp4sServer(serverOptions).toRoutes(e.serverLogicRecoverErrors(logic))
    }
  }

  implicit class RichHttp4sServerEndpoint[I, E, O, F[_]](se: ServerEndpoint[I, E, O, EntityBody[F], F]) {
    def toRoutes(implicit serverOptions: Http4sServerOptions[F], fs: Sync[F], fcs: ContextShift[F]): HttpRoutes[F] =
      new EndpointToHttp4sServer(serverOptions).toRoutes(se)
  }

  implicit class RichHttp4sServerEndpoints[F[_]](serverEndpoints: List[ServerEndpoint[_, _, _, EntityBody[F], F]]) {
    def toRoutes(implicit serverOptions: Http4sServerOptions[F], fs: Sync[F], fcs: ContextShift[F]): HttpRoutes[F] = {
      new EndpointToHttp4sServer(serverOptions).toRoutes(serverEndpoints)
    }
  }

  implicit class RichToMonadFunction[T, U, F[_]: Monad](a: T => F[U]) {
    @deprecated
    def andThenFirst[U_TUPLE, T_TUPLE, O](
        l: U_TUPLE => F[O]
    )(implicit replaceFirst: ReplaceFirstInTuple[T, U, T_TUPLE, U_TUPLE]): T_TUPLE => F[O] = { tTuple =>
      val t = replaceFirst.first(tTuple)
      a(t).flatMap { u =>
        val uTuple = replaceFirst.replace(tTuple, u)
        l(uTuple)
      }
    }
  }

  implicit class RichToMonadOfEitherFunction[T, U, E, F[_]: Monad](a: T => F[Either[E, U]]) {
    @deprecated
    def andThenFirstE[U_TUPLE, T_TUPLE, O](
        l: U_TUPLE => F[Either[E, O]]
    )(implicit replaceFirst: ReplaceFirstInTuple[T, U, T_TUPLE, U_TUPLE]): T_TUPLE => F[Either[E, O]] = { tTuple =>
      val t = replaceFirst.first(tTuple)
      a(t).flatMap {
        case Left(e) => implicitly[Monad[F]].point(Left(e))
        case Right(u) =>
          val uTuple = replaceFirst.replace(tTuple, u)
          l(uTuple)
      }
    }
  }
} 
Example 12
Source File: Http4sRequestToRawBody.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.http4s

import java.io.ByteArrayInputStream

import cats.effect.{Blocker, ContextShift, Sync}
import cats.implicits._
import fs2.Chunk
import org.http4s.headers.{`Content-Disposition`, `Content-Type`}
import org.http4s.{Charset, EntityDecoder, Request, multipart}
import sttp.model.{Header, Part}
import sttp.tapir.{RawPart, RawBodyType}

class Http4sRequestToRawBody[F[_]: Sync: ContextShift](serverOptions: Http4sServerOptions[F]) {
  def apply[R](body: fs2.Stream[F, Byte], bodyType: RawBodyType[R], charset: Option[Charset], req: Request[F]): F[R] = {
    def asChunk: F[Chunk[Byte]] = body.compile.to(Chunk)
    def asByteArray: F[Array[Byte]] = body.compile.to(Chunk).map(_.toByteBuffer.array())

    bodyType match {
      case RawBodyType.StringBody(defaultCharset) => asByteArray.map(new String(_, charset.map(_.nioCharset).getOrElse(defaultCharset)))
      case RawBodyType.ByteArrayBody              => asByteArray
      case RawBodyType.ByteBufferBody             => asChunk.map(_.toByteBuffer)
      case RawBodyType.InputStreamBody            => asByteArray.map(new ByteArrayInputStream(_))
      case RawBodyType.FileBody =>
        serverOptions.createFile(serverOptions.blockingExecutionContext, req).flatMap { file =>
          val fileSink = fs2.io.file.writeAll(file.toPath, Blocker.liftExecutionContext(serverOptions.blockingExecutionContext))
          body.through(fileSink).compile.drain.map(_ => file)
        }
      case m: RawBodyType.MultipartBody =>
        // TODO: use MultipartDecoder.mixedMultipart once available?
        implicitly[EntityDecoder[F, multipart.Multipart[F]]].decode(req, strict = false).value.flatMap {
          case Left(failure) =>
            throw new IllegalArgumentException("Cannot decode multipart body: " + failure) // TODO
          case Right(mp) =>
            val rawPartsF: Vector[F[RawPart]] = mp.parts
              .flatMap(part => part.name.flatMap(name => m.partType(name)).map((part, _)).toList)
              .map { case (part, codecMeta) => toRawPart(part, codecMeta, req).asInstanceOf[F[RawPart]] }

            val rawParts: F[Vector[RawPart]] = rawPartsF.sequence

            rawParts.asInstanceOf[F[R]] // R is Seq[RawPart]
        }
    }
  }

  private def toRawPart[R](part: multipart.Part[F], partType: RawBodyType[R], req: Request[F]): F[Part[R]] = {
    val dispositionParams = part.headers.get(`Content-Disposition`).map(_.parameters).getOrElse(Map.empty)
    val charset = part.headers.get(`Content-Type`).flatMap(_.charset)
    apply(part.body, partType, charset, req)
      .map(r =>
        Part(
          part.name.getOrElse(""),
          r,
          otherDispositionParams = dispositionParams - Part.NameDispositionParam,
          headers = part.headers.toList.map(h => Header(h.name.value, h.value))
        )
      )
  }
} 
Example 13
Source File: EndpointToHttp4sServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.http4s

import cats.data._
import cats.effect.{ContextShift, Sync}
import cats.implicits._
import org.http4s.{EntityBody, HttpRoutes, Request, Response}
import org.log4s._
import sttp.tapir.monad.MonadError
import sttp.tapir.server.internal.{DecodeInputsResult, InputValues, InputValuesResult}
import sttp.tapir.server.{DecodeFailureContext, DecodeFailureHandling, ServerDefaults, ServerEndpoint, internal}
import sttp.tapir.{DecodeResult, Endpoint, EndpointIO, EndpointInput}

class EndpointToHttp4sServer[F[_]: Sync: ContextShift](serverOptions: Http4sServerOptions[F]) {
  private val outputToResponse = new OutputToHttp4sResponse[F](serverOptions)

  def toRoutes[I, E, O](se: ServerEndpoint[I, E, O, EntityBody[F], F]): HttpRoutes[F] = {
    val service: HttpRoutes[F] = HttpRoutes[F] { req: Request[F] =>
      def decodeBody(result: DecodeInputsResult): F[DecodeInputsResult] = {
        result match {
          case values: DecodeInputsResult.Values =>
            values.bodyInput match {
              case Some(bodyInput @ EndpointIO.Body(bodyType, codec, _)) =>
                new Http4sRequestToRawBody(serverOptions).apply(req.body, bodyType, req.charset, req).map { v =>
                  codec.decode(v) match {
                    case DecodeResult.Value(bodyV)     => values.setBodyInputValue(bodyV)
                    case failure: DecodeResult.Failure => DecodeInputsResult.Failure(bodyInput, failure): DecodeInputsResult
                  }
                }

              case None => (values: DecodeInputsResult).pure[F]
            }
          case failure: DecodeInputsResult.Failure => (failure: DecodeInputsResult).pure[F]
        }
      }

      def valueToResponse(value: Any): F[Response[F]] = {
        val i = value.asInstanceOf[I]
        se.logic(new CatsMonadError)(i)
          .map {
            case Right(result) => outputToResponse(ServerDefaults.StatusCodes.success, se.endpoint.output, result)
            case Left(err)     => outputToResponse(ServerDefaults.StatusCodes.error, se.endpoint.errorOutput, err)
          }
          .flatTap { response => serverOptions.logRequestHandling.requestHandled(se.endpoint, response.status.code) }
          .onError {
            case e: Exception => serverOptions.logRequestHandling.logicException(se.endpoint, e)
          }
      }

      OptionT(decodeBody(internal.DecodeInputs(se.endpoint.input, new Http4sDecodeInputsContext[F](req))).flatMap {
        case values: DecodeInputsResult.Values =>
          InputValues(se.endpoint.input, values) match {
            case InputValuesResult.Value(params, _)        => valueToResponse(params.asAny).map(_.some)
            case InputValuesResult.Failure(input, failure) => handleDecodeFailure(se.endpoint, input, failure)
          }
        case DecodeInputsResult.Failure(input, failure) => handleDecodeFailure(se.endpoint, input, failure)
      })
    }

    service
  }

  def toRoutes[I, E, O](serverEndpoints: List[ServerEndpoint[_, _, _, EntityBody[F], F]]): HttpRoutes[F] = {
    NonEmptyList.fromList(serverEndpoints.map(se => toRoutes(se))) match {
      case Some(routes) => routes.reduceK
      case None         => HttpRoutes.empty
    }
  }

  private def handleDecodeFailure[I](
      e: Endpoint[_, _, _, _],
      input: EndpointInput[_],
      failure: DecodeResult.Failure
  ): F[Option[Response[F]]] = {
    val decodeFailureCtx = DecodeFailureContext(input, failure)
    val handling = serverOptions.decodeFailureHandler(decodeFailureCtx)
    handling match {
      case DecodeFailureHandling.NoMatch =>
        serverOptions.logRequestHandling.decodeFailureNotHandled(e, decodeFailureCtx).map(_ => None)
      case DecodeFailureHandling.RespondWithResponse(output, value) =>
        serverOptions.logRequestHandling
          .decodeFailureHandled(e, decodeFailureCtx, value)
          .map(_ => Some(outputToResponse(ServerDefaults.StatusCodes.error, output, value)))
    }
  }

  private class CatsMonadError(implicit F: cats.MonadError[F, Throwable]) extends MonadError[F] {
    override def unit[T](t: T): F[T] = F.pure(t)
    override def map[T, T2](fa: F[T])(f: T => T2): F[T2] = F.map(fa)(f)
    override def flatMap[T, T2](fa: F[T])(f: T => F[T2]): F[T2] = F.flatMap(fa)(f)
    override def error[T](t: Throwable): F[T] = F.raiseError(t)
    override protected def handleWrappedError[T](rt: F[T])(h: PartialFunction[Throwable, F[T]]): F[T] = F.recoverWith(rt)(h)
  }
}

object EndpointToHttp4sServer {
  private[http4s] val log: Logger = getLogger
} 
Example 14
Source File: LaserdiscFs2Suite.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import cats.effect.syntax.effect._
import cats.effect.{ConcurrentEffect, ContextShift, Timer}
import laserdisc.auto._
import munit.FunSuite

abstract class LaserdiscFs2Suite[F[_]: ContextShift: Timer: ConcurrentEffect](p: Port) extends FunSuite {

  private var cleanUp: F[Unit]               = _
  protected final var client: RedisClient[F] = _

  override final def beforeAll(): Unit = {
    val (cl, cu) = RedisClient.to("127.0.0.1", p).allocated.toIO.unsafeRunSync()
    cleanUp = cu
    client = cl
  }

  override final def afterAll(): Unit =
    cleanUp.toIO.unsafeRunSync()

  protected def assertAllEqual[A](as: List[A], a: A): Unit =
    as.foreach(assertEquals(_, a))
} 
Example 15
Source File: FinatraServerTests.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.finatra

import cats.data.NonEmptyList
import cats.effect.{ContextShift, IO, Resource, Timer}
import com.github.ghik.silencer.silent
import com.twitter.finagle.http.Request
import com.twitter.finatra.http.filters.{AccessLoggingFilter, ExceptionMappingFilter}
import com.twitter.finatra.http.{Controller, EmbeddedHttpServer, HttpServer}
import com.twitter.finatra.http.routing.HttpRouter
import com.twitter.util.{Future, FuturePool}
import sttp.tapir.Endpoint
import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint}
import sttp.tapir.server.tests.ServerTests
import sttp.tapir.tests.{Port, PortCounter}

import scala.concurrent.ExecutionContext
import scala.reflect.ClassTag
import scala.concurrent.duration._

class FinatraServerTests extends ServerTests[Future, Nothing, FinatraRoute] {
  override def streamingSupport: Boolean = false

  private val futurePool = FuturePool.unboundedPool

  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global
  implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)
  implicit val timer: Timer[IO] = IO.timer(ec)

  override def pureResult[T](t: T): Future[T] = Future.value(t)

  override def suspendResult[T](t: => T): Future[T] =
    futurePool {
      t
    }

  override def route[I, E, O](
      e: ServerEndpoint[I, E, O, Nothing, Future],
      decodeFailureHandler: Option[DecodeFailureHandler] = None
  ): FinatraRoute = {
    implicit val serverOptions: FinatraServerOptions =
      FinatraServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler))
    e.toRoute
  }

  override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => Future[O])(implicit
      eClassTag: ClassTag[E]
  ): FinatraRoute = {
    e.toRouteRecoverErrors(fn)
  }

  override def server(routes: NonEmptyList[FinatraRoute], port: Port): Resource[IO, Unit] = FinatraServerTests.server(routes, port)

  override lazy val portCounter: PortCounter = new PortCounter(58000)
}

object FinatraServerTests {
  def server(routes: NonEmptyList[FinatraRoute], port: Port)(implicit ioTimer: Timer[IO]): Resource[IO, Unit] = {
    def waitUntilHealthy(s: EmbeddedHttpServer, count: Int): IO[EmbeddedHttpServer] =
      if (s.isHealthy) IO.pure(s)
      else if (count > 1000) IO.raiseError(new IllegalStateException("Server unhealthy"))
      else IO.sleep(10.milliseconds).flatMap(_ => waitUntilHealthy(s, count + 1))

    val bind = IO {
      class TestController extends Controller with TapirController {
        routes.toList.foreach(addTapirRoute)
      }

      class TestServer extends HttpServer {
        @silent("discarded")
        override protected def configureHttp(router: HttpRouter): Unit = {
          router
            .filter[AccessLoggingFilter[Request]]
            .filter[ExceptionMappingFilter[Request]]
            .add(new TestController)
        }
      }

      val server = new EmbeddedHttpServer(
        new TestServer,
        Map(
          "http.port" -> s":$port"
        ),
        // in the default implementation waitForWarmup suspends the thread for 1 second between healthy checks
        // we improve on that by checking every 10ms
        waitForWarmup = false
      )
      server.start()
      server
    }.flatMap(waitUntilHealthy(_, 0))

    Resource
      .make(bind)(httpServer => IO(httpServer.close()))
      .map(_ => ())
  }
} 
Example 16
Source File: SttpClientTests.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.client.sttp

import cats.effect.{ContextShift, IO}
import cats.implicits._
import sttp.tapir.{DecodeResult, Endpoint}
import sttp.tapir.client.tests.ClientTests
import sttp.client._
import sttp.client.asynchttpclient.fs2.AsyncHttpClientFs2Backend

import scala.concurrent.ExecutionContext

class SttpClientTests extends ClientTests[fs2.Stream[IO, Byte]] {
  private implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.Implicits.global)
  private implicit val backend: SttpBackend[IO, fs2.Stream[IO, Byte], NothingT] = AsyncHttpClientFs2Backend[IO]().unsafeRunSync()

  override def mkStream(s: String): fs2.Stream[IO, Byte] = fs2.Stream.emits(s.getBytes("utf-8"))
  override def rmStream(s: fs2.Stream[IO, Byte]): String =
    s.through(fs2.text.utf8Decode)
      .compile
      .foldMonoid
      .unsafeRunSync()

  override def send[I, E, O, FN[_]](e: Endpoint[I, E, O, fs2.Stream[IO, Byte]], port: Port, args: I): IO[Either[E, O]] = {
    e.toSttpRequestUnsafe(uri"http://localhost:$port").apply(args).send().map(_.body)
  }

  override def safeSend[I, E, O, FN[_]](
      e: Endpoint[I, E, O, fs2.Stream[IO, Byte]],
      port: Port,
      args: I
  ): IO[DecodeResult[Either[E, O]]] = {
    e.toSttpRequest(uri"http://localhost:$port").apply(args).send().map(_.body)
  }

  override protected def afterAll(): Unit = {
    backend.close().unsafeRunSync()
    super.afterAll()
  }
} 
Example 17
Source File: SwaggerHttp4s.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.swagger.http4s

import java.util.Properties

import cats.effect.{Blocker, ContextShift, Sync}
import org.http4s.{HttpRoutes, StaticFile, Uri}
import org.http4s.dsl.Http4sDsl
import org.http4s.headers.Location

import scala.concurrent.ExecutionContext


class SwaggerHttp4s(
    yaml: String,
    contextPath: String = "docs",
    yamlName: String = "docs.yaml",
    redirectQuery: Map[String, Seq[String]] = Map.empty
) {
  private val swaggerVersion = {
    val p = new Properties()
    val pomProperties = getClass.getResourceAsStream("/META-INF/maven/org.webjars/swagger-ui/pom.properties")
    try p.load(pomProperties)
    finally pomProperties.close()
    p.getProperty("version")
  }

  def routes[F[_]: ContextShift: Sync]: HttpRoutes[F] = {
    val dsl = Http4sDsl[F]
    import dsl._

    HttpRoutes.of[F] {
      case path @ GET -> Root / `contextPath` =>
        val queryParameters = Map("url" -> Seq(s"${path.uri}/$yamlName")) ++ redirectQuery
        Uri
          .fromString(s"${path.uri}/index.html")
          .map(uri => uri.setQueryParams(queryParameters))
          .map(uri => PermanentRedirect(Location(uri)))
          .getOrElse(NotFound())
      case GET -> Root / `contextPath` / `yamlName` =>
        Ok(yaml)
      case GET -> Root / `contextPath` / swaggerResource =>
        StaticFile
          .fromResource(
            s"/META-INF/resources/webjars/swagger-ui/$swaggerVersion/$swaggerResource",
            Blocker.liftExecutionContext(ExecutionContext.global)
          )
          .getOrElseF(NotFound())
    }
  }
} 
Example 18
Source File: RedocHttp4s.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.redoc.http4s

import cats.effect.{ContextShift, Sync}
import org.http4s.dsl.Http4sDsl
import org.http4s.headers._
import org.http4s.{Charset, HttpRoutes, MediaType}

import scala.io.Source


class RedocHttp4s(title: String, yaml: String, yamlName: String = "docs.yaml") {
  private lazy val html = {
    val fileName = "redoc.html"
    val is = getClass.getClassLoader.getResourceAsStream(fileName)
    assert(Option(is).nonEmpty, s"Could not find file ${fileName} on classpath.")
    val rawHtml = Source.fromInputStream(is).mkString
    // very poor man's templating engine
    rawHtml.replaceAllLiterally("{{docsPath}}", yamlName).replaceAllLiterally("{{title}}", title)
  }

  def routes[F[_]: ContextShift: Sync]: HttpRoutes[F] = {
    val dsl = Http4sDsl[F]
    import dsl._

    HttpRoutes.of[F] {
      case req @ GET -> Root if req.pathInfo.endsWith("/") =>
        Ok(html, `Content-Type`(MediaType.text.html, Charset.`UTF-8`))
      // as the url to the yaml file is relative, it is important that there is a trailing slash
      case req @ GET -> Root =>
        val uri = req.uri
        PermanentRedirect(Location(uri.withPath(uri.path.concat("/"))))
      case GET -> Root / `yamlName` =>
        Ok(yaml, `Content-Type`(MediaType.text.yaml, Charset.`UTF-8`))
    }
  }
} 
Example 19
Source File: IoAdapt.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package external
package library

import cats.arrow.FunctionK
import cats.effect.{Concurrent, ContextShift, IO}
import external.library.IoAdapt.-->
import zio.{Task, ZIO}

import scala.concurrent.Future


  def apply[A]: (=>F[A]) => G[A]

  def functionK: FunctionK[F, G] =
    λ[FunctionK[F, G]](apply(_))
}

private[library] sealed trait IoAdaptInstances {
  implicit def catsIoToZioTask(implicit cc: Concurrent[Task]): IO --> Task =
    new IoAdapt[IO, Task] {
      def apply[A]: (=>IO[A]) => Task[A] =
        io => cc.liftIO(io)
    }

  implicit val futureToZioTask: Future --> Task =
    new IoAdapt[Future, Task] {
      def apply[A]: (=>Future[A]) => Task[A] =
        ft => ZIO.fromFuture(ec => ft.map(identity)(ec))
    }

  implicit def futureToIo(implicit cs: ContextShift[IO]): Future --> IO =
    new IoAdapt[Future, IO] {
      def apply[A]: (=>Future[A]) => IO[A] =
        IO.fromFuture[A] _ compose IO.delay
    }
}

object IoAdapt extends IoAdaptInstances {
  type -->[F[_], G[_]] = IoAdapt[F, G]
} 
Example 20
Source File: PriceService.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package service

import cats.Parallel
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.apply._
import cats.syntax.flatMap._
import cats.syntax.parallel._
import external.library.IoAdapt.-->
import external.{TeamOneHttpApi, TeamThreeCacheApi, TeamTwoHttpApi}
import integration.{CacheIntegration, ProductIntegration, UserIntegration}
import log.effect.LogWriter
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration._

final case class PriceService[F[_]: Concurrent: Timer: ContextShift: Parallel[*[_]]](
  cacheDep: TeamThreeCacheApi[ProductId, Product],
  teamOneStupidName: TeamOneHttpApi,
  teamTwoStupidName: TeamTwoHttpApi,
  logger: LogWriter[F]
)(
  implicit ev1: IO --> F,
  ev2: Future --> F
) {
  private[this] val cache      = CacheIntegration[F](cacheDep, 10.seconds)
  private[this] val userInt    = UserIntegration[F](teamTwoStupidName, teamOneStupidName, 10.seconds)
  private[this] val productInt = ProductIntegration[F](teamTwoStupidName, teamOneStupidName, 10.seconds)

  private[this] lazy val productRepo: ProductRepo[F]             = ProductRepo(cache, productInt, logger)
  private[this] lazy val priceCalculator: PriceCalculator[F]     = PriceCalculator(productInt, logger)
  private[this] lazy val preferenceFetcher: PreferenceFetcher[F] = PreferenceFetcher(userInt, logger)

  
  def prices(userId: UserId, productIds: Seq[ProductId]): F[List[Price]] =
    (userFor(userId), productsFor(productIds), preferencesFor(userId))
      .parMapN(priceCalculator.finalPrices)
      .flatten

  private[this] def userFor(userId: UserId): F[User] =
    logger.debug(s"Collecting user details for id $userId") >>
      userInt.user(userId) <*
      logger.debug(s"User details collected for id $userId")

  private[this] def preferencesFor(userId: UserId): F[UserPreferences] =
    logger.debug(s"Looking up user preferences for user $userId") >>
      preferenceFetcher.userPreferences(userId) <*
      logger.debug(s"User preferences look up for $userId completed")

  private[this] def productsFor(productIds: Seq[ProductId]): F[List[Product]] =
    logger.debug(s"Collecting product details for products $productIds") >>
      productRepo.storedProducts(productIds) <*
      logger.debug(s"Product details collection for $productIds completed")
} 
Example 21
Source File: UserIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{PreferenceErr, UserErr}
import external._
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

sealed trait UserIntegration[F[_]] {
  def user: UserId => F[User]
  def usersPreferences: UserId => F[UserPreferences]
}

object UserIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]](
    userDep: TeamTwoHttpApi,
    preferencesDep: TeamOneHttpApi,
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): UserIntegration[F] =
    new UserIntegration[F] {
      def user: UserId => F[User] = { id =>
        CS.shift >> userDep.user(id).adaptedTo[F].timeout(t).narrowFailureTo[UserErr]
      }

      def usersPreferences: UserId => F[UserPreferences] = { id =>
        CS.shift >> preferencesDep.usersPreferences(id).adaptedTo[F].timeout(t).narrowFailureTo[PreferenceErr]
      }
    }
} 
Example 22
Source File: CacheIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{CacheLookupError, CacheStoreError}
import external.TeamThreeCacheApi
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.duration.FiniteDuration

sealed trait CacheIntegration[F[_]] {
  def cachedProduct: ProductId => F[Option[Product]]
  def storeProductToCache: ProductId => Product => F[Unit]
}

object CacheIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]](
    cache: TeamThreeCacheApi[ProductId, Product],
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): CacheIntegration[F] =
    new CacheIntegration[F] {
      def cachedProduct: ProductId => F[Option[Product]] =
        pId => CS.shift >> cache.get(pId).adaptedTo[F].timeout(t).narrowFailureTo[CacheLookupError]

      def storeProductToCache: ProductId => Product => F[Unit] =
        pId => p => CS.shift >> cache.put(pId)(p).adaptedTo[F].timeout(t).narrowFailureTo[CacheStoreError]
    }
} 
Example 23
Source File: ProductIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{ProductErr, ProductPriceErr}
import external._
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

sealed trait ProductIntegration[F[_]] {
  def product: ProductId => F[Option[Product]]
  def productPrice: Product => UserPreferences => F[Price]
}

object ProductIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]](
    productDep: TeamTwoHttpApi,
    pricesDep: TeamOneHttpApi,
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): ProductIntegration[F] =
    new ProductIntegration[F] {
      def product: ProductId => F[Option[Product]] = { ps =>
        CS.shift >> productDep.product(ps).adaptedTo[F].timeout(t).narrowFailureTo[ProductErr]
      }

      def productPrice: Product => UserPreferences => F[Price] = { p => pref =>
        CS.shift >> pricesDep.productPrice(p)(pref).adaptedTo[F].timeout(t).narrowFailureTo[ProductPriceErr]
      }
    }
} 
Example 24
Source File: CatsIoTestRunner.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.util.concurrent.{Executors, TimeUnit}

import cats.effect.{ContextShift, IO, Timer}
import cats.syntax.flatMap._
import laserdisc.auto._
import log.effect.fs2.SyncLogWriter.consoleLogUpToLevel
import log.effect.{LogLevels, LogWriter}

import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContext.fromExecutor

object CatsIoTestRunner extends TestCases {

  private[this] val ec: ExecutionContext = fromExecutor(Executors.newFixedThreadPool(8))

  private[this] implicit val timer: Timer[IO]               = IO.timer(ec)
  private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)
  private[this] implicit val logWriter: LogWriter[IO]       = consoleLogUpToLevel(LogLevels.Error)

  def main(args: Array[String]): Unit = {

    val task = timer.clock.monotonic(TimeUnit.MINUTES) >>= { start: Long =>
      RedisClient.to("localhost", 6379).use { cl =>
        def loop(count: Long): IO[Long] =
          case1(cl) >> timer.clock.monotonic(TimeUnit.MINUTES) >>= { current =>
            if (current - start >= 2) IO.pure(count)
            else loop(count + 1)
          }

        loop(0)
      }
    }

    println(s"Avg send/s: ${task.unsafeRunSync() * 24.0 / 2 / 60}")
    sys.exit()
  }
} 
Example 25
Source File: DefaultLoggerSpec.scala    From laserdisc   with MIT License 5 votes vote down vote up
import java.util.concurrent.ForkJoinPool

import cats.effect.{ContextShift, IO, Timer}
import munit.FunSuite

import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContext.fromExecutor

final class DefaultLoggerSpec extends FunSuite with TestLogCapture {

  private def assertNot(c: =>Boolean): Unit = assert(!c)

  private[this] val ec: ExecutionContext = fromExecutor(new ForkJoinPool())

  private[this] implicit val timer: Timer[IO]               = IO.timer(ec)
  private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)

  test("The readme example doesn't log when no LogWriter is given") {
    import cats.syntax.flatMap._
    import laserdisc._
    import laserdisc.all._
    import laserdisc.auto._
    import laserdisc.fs2._
    import log.effect.fs2.SyncLogWriter.consoleLog

    val redisTest: IO[Unit] =
      RedisClient.to("localhost", 6379).use { client =>
        client.send(
          set("a", 23),
          set("b", 55),
          get[PosInt]("b"),
          get[PosInt]("a")
        ) >>= {
          case (Right(OK), Right(OK), Right(Some(getOfb)), Right(Some(getOfa))) if getOfb.value == 55 && getOfa.value == 23 =>
            consoleLog[IO].info("yay!")
          case other =>
            consoleLog[IO].error(s"something went terribly wrong $other") >>
              IO.raiseError(new RuntimeException("boom"))
        }
      }

    val logged = capturedConsoleOutOf(redisTest)

    assertNot(logged contains "Starting connection")
    assertNot(logged contains "Server available for publishing: localhost:6379")
    assertNot(logged contains "sending Arr(Bulk(SET),Bulk(a),Bulk(23))")
    assertNot(logged contains "receiving Str(OK)")
    assertNot(logged contains "sending Arr(Bulk(SET),Bulk(b),Bulk(55))")
    assertNot(logged contains "receiving Str(OK)")
    assertNot(logged contains "sending Arr(Bulk(GET),Bulk(b))")
    assertNot(logged contains "receiving Bulk(55)")
    assertNot(logged contains "sending Arr(Bulk(GET),Bulk(a))")
    assertNot(logged contains "receiving Bulk(23)")
    assertNot(logged contains "Shutting down connection")
    assertNot(logged contains "Shutdown complete")
    assertNot(logged contains "Connection terminated: No issues")
  }
} 
Example 26
Source File: ReadmeExampleSpec.scala    From laserdisc   with MIT License 5 votes vote down vote up
import java.util.concurrent.ForkJoinPool

import cats.effect.{ContextShift, IO, Timer}
import munit.FunSuite

import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContext.fromExecutor

final class ReadmeExampleSpec extends FunSuite with TestLogCapture {

  private[this] val ec: ExecutionContext = fromExecutor(new ForkJoinPool())

  private[this] implicit val timer: Timer[IO]               = IO.timer(ec)
  private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)

  test("The readme example gives the expected output and logs when a LogWriter is in scope") {
    import cats.syntax.flatMap._
    import laserdisc._
    import laserdisc.all._
    import laserdisc.auto._
    import laserdisc.fs2._
    import log.effect.LogWriter
    import log.effect.fs2.SyncLogWriter

    def redisTest(implicit log: LogWriter[IO]): IO[Unit] =
      RedisClient.to("localhost", 6379).use { client =>
        client.send(
          set("a", 23),
          set("b", 55),
          get[PosInt]("b"),
          get[PosInt]("a")
        ) >>= {
          case (Right(OK), Right(OK), Right(Some(getOfb)), Right(Some(getOfa))) if getOfb.value == 55 && getOfa.value == 23 =>
            log info "yay!"
          case other =>
            log.error(s"something went terribly wrong $other") >>
              IO.raiseError(new RuntimeException("boom"))
        }
      }

    val logged = capturedConsoleOutOf {
      redisTest(SyncLogWriter.consoleLog[IO])
    }

    assert(logged contains "Starting connection")
    assert(logged contains "Connected to server localhost:6379")
    assert(logged contains "sending Arr(Bulk(SET),Bulk(a),Bulk(23))")
    assert(logged contains "receiving Str(OK)")
    assert(logged contains "sending Arr(Bulk(SET),Bulk(b),Bulk(55))")
    assert(logged contains "receiving Str(OK)")
    assert(logged contains "sending Arr(Bulk(GET),Bulk(b))")
    assert(logged contains "receiving Bulk(55)")
    assert(logged contains "sending Arr(Bulk(GET),Bulk(a))")
    assert(logged contains "receiving Bulk(23)")
    assert(logged contains "yay!")
    assert(logged contains "Shutting down connection")
    assert(logged contains "Shutdown complete")
    assert(logged contains "Connection terminated: No issues")
  }
} 
Example 27
Source File: implicits.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.network.tcp

import cats.effect.{Concurrent, ContextShift, IO, Sync}
import cats.implicits._
import fs2.Chunk
import fs2.io.tcp.Socket
import javax.net.ssl.SSLContext
import jbok.common.thread.ThreadUtil
import jbok.crypto.ssl.SSLContextHelper
import jbok.network.Message
import spinoco.fs2.crypto.io.tcp.TLSSocket

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object implicits {
  val maxBytes: Int           = 4 * 1024 * 1024
  val timeout                 = Some(10.seconds)
  val sslEC: ExecutionContext = ThreadUtil.blockingThreadPool[IO]("jbok-tls").allocated.unsafeRunSync()._1

  implicit class TcpSocketOps[F[_]](val socket: Socket[F]) extends AnyVal {
    def readMessage(implicit F: Sync[F]): F[Message[F]] =
      socket.read(maxBytes, timeout).flatMap {
        case Some(chunk) => Message.decodeChunk(chunk)
        case None        => F.raiseError(new Exception(s"socket already closed"))
      }

    def writeMessage(message: Message[F]): F[Unit] =
      socket.write(Chunk.array(Message.encodeBytes(message).byteArray), timeout)

    def toTLSSocket(sslOpt: Option[SSLContext], client: Boolean)(implicit F: Concurrent[F], cs: ContextShift[F]): F[Socket[F]] =
      sslOpt match {
        case Some(ssl) =>
          if (client) TLSSocket.instance(socket, SSLContextHelper.clientEngine(ssl).engine, sslEC).widen[Socket[F]]
          else TLSSocket.instance(socket, SSLContextHelper.serverEngine(ssl).engine, sslEC).widen[Socket[F]]
        case None => F.pure(socket)
      }
  }
} 
Example 28
Source File: Doobie.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.app.service.store.doobie

import cats.effect.{Async, ContextShift, Resource}
import doobie._
import doobie.hikari.HikariTransactor
import jbok.core.config.DatabaseConfig

object Doobie {
  def xa[F[_]](config: DatabaseConfig)(implicit F: Async[F], cs: ContextShift[F]): Resource[F, Transactor[F]] =
    for {
      ce <- ExecutionContexts.fixedThreadPool[F](32) // our connect EC
      te <- ExecutionContexts.cachedThreadPool[F]    // our transaction EC
      xa <- HikariTransactor.newHikariTransactor[F](
        config.driver,
        config.url,
        config.user,     // username
        config.password, // password
        ce,              // await connection here
        te               // execute JDBC operations here
      )
    } yield xa
} 
Example 29
Source File: PostgresInstanceSpec.scala    From fuuid   with MIT License 5 votes vote down vote up
package io.chrisdavenport.fuuid.doobie.postgres

import cats.effect.{ContextShift, IO}
import cats.implicits._
import doobie._
import doobie.implicits._
import doobie.postgres.implicits._
import doobie.specs2._
import io.chrisdavenport.fuuid.FUUID
import io.chrisdavenport.fuuid.doobie.implicits._
import io.chrisdavenport.testcontainersspecs2.ForAllTestContainer
import com.dimafeng.testcontainers.GenericContainer
import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy
import java.time.Duration
import java.time.temporal.ChronoUnit.SECONDS
import org.specs2._
import scala.concurrent.ExecutionContext.Implicits.global

class PostgresInstanceSpec extends mutable.Specification with IOChecker with ForAllTestContainer {
  sequential
  implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(global)

  override lazy val container = GenericContainer(
    "postgres",
    List(5432),
    Map(
      "POSTGRES_DB" -> dbName,
      "POSTGRES_USER" -> dbUserName,
      "POSTGRES_PASSWORD" -> dbPassword
    ),
    waitStrategy = new LogMessageWaitStrategy()
      .withRegEx(".*database system is ready to accept connections.*\\s")
      .withTimes(2)
      .withStartupTimeout(Duration.of(60, SECONDS))
  )

  lazy val driverName = "org.postgresql.Driver"
  lazy val jdbcUrl = s"jdbc:postgresql://${container.container.getContainerIpAddress()}:${container.container.getMappedPort(5432)}/${dbName}"
  lazy val dbUserName = "user"
  lazy val dbPassword = "password"
  lazy val dbName = "db"

  lazy val transactor = Transactor.fromDriverManager[IO](
    driverName,
    jdbcUrl,
    dbUserName,
    dbPassword
  )


  override def afterStart(): Unit = {
    sql"""
    CREATE TABLE IF NOT EXISTS PostgresInstanceSpec (
      id   UUID NOT NULL
    )
    """.update.run.transact(transactor).void.unsafeRunSync()
  }

  def insertId(fuuid: FUUID): Update0 = {
    sql"""INSERT into PostgresInstanceSpec (id) VALUES ($fuuid)""".update
  }
  val fuuid = FUUID.randomFUUID[IO].unsafeRunSync

  check(sql"SELECT id from PostgresInstanceSpec".query[FUUID])
  check(insertId(fuuid))

} 
Example 30
Source File: PostgresTraversalSpec.scala    From fuuid   with MIT License 5 votes vote down vote up
package io.chrisdavenport.fuuid.doobie.postgres

import cats.effect.{ContextShift, IO}
import cats.implicits._
import doobie._
import doobie.implicits._
import doobie.postgres.implicits._
import io.chrisdavenport.fuuid.doobie.implicits._
import io.chrisdavenport.fuuid._
import io.chrisdavenport.testcontainersspecs2.ForAllTestContainer
import com.dimafeng.testcontainers.GenericContainer
import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy
import java.time.Duration
import java.time.temporal.ChronoUnit.SECONDS
import org.specs2._
import scala.concurrent.ExecutionContext.Implicits.global

class PostgresTraversalSpec extends mutable.Specification
  with ScalaCheck with FUUIDArbitraries with ForAllTestContainer {
  sequential
  implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(global)

  override lazy val container = GenericContainer(
    "postgres",
    List(5432),
    Map(
      "POSTGRES_DB" -> dbName,
      "POSTGRES_USER" -> dbUserName,
      "POSTGRES_PASSWORD" -> dbPassword
    ),
    waitStrategy = new LogMessageWaitStrategy()
      .withRegEx(".*database system is ready to accept connections.*\\s")
      .withTimes(2)
      .withStartupTimeout(Duration.of(60, SECONDS))
  )

  lazy val driverName = "org.postgresql.Driver"
  lazy val jdbcUrl = s"jdbc:postgresql://${container.container.getContainerIpAddress()}:${container.container.getMappedPort(5432)}/${dbName}"
  lazy val dbUserName = "user"
  lazy val dbPassword = "password"
  lazy val dbName = "db"

  lazy val transactor = Transactor.fromDriverManager[IO](
    driverName,
    jdbcUrl,
    dbUserName,
    dbPassword
  )

  // lazy val transactor = Transactor.fromDriverManager[IO](
  //   "org.postgresql.Driver",
  //   "jdbc:postgresql:world",
  //   "postgres", ""
  // )

  override def afterStart(): Unit = {
    sql"""
    CREATE TABLE IF NOT EXISTS PostgresTraversalSpec (
      id   UUID NOT NULL
    )
    """.update.run.transact(transactor).void.unsafeRunSync()
  }

  def queryBy(fuuid: FUUID): Query0[FUUID] = {
    sql"""SELECT id from PostgresTraversalSpec where id = ${fuuid}""".query[FUUID]
  }

  def insertId(fuuid: FUUID): Update0 = {
    sql"""INSERT into PostgresTraversalSpec (id) VALUES ($fuuid)""".update
  }

  "Doobie Postgres Meta" should {
    "traverse input and then extraction" in prop { fuuid: FUUID =>

      val action = for {
        _ <- insertId(fuuid).run.transact(transactor)
        fuuid <- queryBy(fuuid).unique.transact(transactor)
      } yield fuuid

      action.unsafeRunSync must_=== fuuid
    }
    "fail on a non-present value" in prop { fuuid: FUUID =>
      queryBy(fuuid)
        .unique
        .transact(transactor)
        .attempt
        .map(_.isLeft)
        .unsafeRunSync must_=== true
    }
  }

} 
Example 31
Source File: H2TraversalSpec.scala    From fuuid   with MIT License 5 votes vote down vote up
package io.chrisdavenport.fuuid.doobie.h2

import cats.effect.{ContextShift, IO}
import cats.implicits._
import doobie._
import doobie.h2.implicits._
import doobie.implicits._
import io.chrisdavenport.fuuid.doobie.implicits._
import io.chrisdavenport.fuuid._
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAll
import scala.concurrent.ExecutionContext.Implicits.global

class H2TraversalSpec extends Specification
  with BeforeAll with ScalaCheck with FUUIDArbitraries {

  implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(global)

  lazy val transactor: Transactor[IO] =
    Transactor.fromDriverManager[IO](
      driver = "org.h2.Driver",
      url = "jdbc:h2:mem:testH2Table;DB_CLOSE_DELAY=-1",
      user = "sa",
      pass = ""
    )

  def beforeAll(): Unit = {
    sql"""
    CREATE TABLE testH2Table (
      id   UUID NOT NULL
    )
    """.update.run.transact(transactor).void.unsafeRunSync
  }

  def queryBy(fuuid: FUUID): Query0[FUUID] = {
    sql"""SELECT id from testH2Table where id = ${fuuid}""".query[FUUID]
  }

  def insertId(fuuid: FUUID): Update0 = {
    sql"""INSERT into testH2Table (id) VALUES ($fuuid)""".update
  }

  "Doobie H2 Meta" should {

    "traverse input and then extraction" in prop { fuuid: FUUID =>

      val action = for {
        _ <- insertId(fuuid).run.transact(transactor)
        fuuid <- queryBy(fuuid).unique.transact(transactor)
      } yield fuuid

      action.unsafeRunSync must_=== fuuid
    }

    "fail on a non-present value" in prop { fuuid: FUUID =>
      queryBy(fuuid)
        .unique
        .transact(transactor)
        .attempt
        .map(_.isLeft)
        .unsafeRunSync must_=== true
    }
  }

} 
Example 32
Source File: H2InstanceSpec.scala    From fuuid   with MIT License 5 votes vote down vote up
package io.chrisdavenport.fuuid.doobie.h2

import cats.effect.{ContextShift, IO}
import cats.syntax.functor._
import doobie._
import doobie.h2.implicits._
import doobie.implicits._
import doobie.specs2._
import io.chrisdavenport.fuuid.FUUID
import io.chrisdavenport.fuuid.doobie.implicits._
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAll
import scala.concurrent.ExecutionContext.Implicits.global

class H2InstanceSpec extends Specification with IOChecker with BeforeAll {

  implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(global)

  lazy val transactor: Transactor[IO] =
    Transactor.fromDriverManager[IO](
      driver = "org.h2.Driver",
      url = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1",
      user = "sa",
      pass = ""
    )

  def beforeAll(): Unit = {
    sql"CREATE TABLE test (id UUID NOT NULL)".update.run.transact(transactor).void.unsafeRunSync
  }

  def insertId(fuuid: FUUID): Update0 = {
    sql"""INSERT into test (id) VALUES ($fuuid)""".update
  }

  val fuuid = FUUID.randomFUUID[IO].unsafeRunSync

  check(sql"SELECT id from test".query[FUUID])
  check(insertId(fuuid))

} 
Example 33
Source File: ServerSentEventsTests.scala    From lolhttp   with Apache License 2.0 5 votes vote down vote up
package lol.http


import cats.implicits._
import cats.effect.{ContextShift, IO, Timer}
import fs2.concurrent.SignallingRef
import fs2.{Chunk, Stream}
import lol.http.ServerSentEvents._

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class ServerSentEventsTests extends Tests {

  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global
  implicit val timer: Timer[IO] = IO.timer(ec)
  implicit val cs: ContextShift[IO] = IO.contextShift(ec)

  val App: Service = {
    case url"/" =>
      Ok("Hello")
    case url"/stream" =>
      Ok(Stream(Event("Hello"), Event("World")).covaryAll[IO, Event[String]])
    case url"/fakeStream" =>
      Ok("Hello").addHeaders(h"Content-Type" -> h"text/event-stream")
  }

  test("Valid string events stream") {
    withServer(Server.listen()(App)) { server =>
      await() {
        Client("localhost", server.port).runAndStop { client =>
          client.run(Get("/stream")) { response =>
            response.readAs[Stream[IO,Event[String]]].flatMap { eventStream =>
              eventStream.compile.toVector.map(_.toList)
            }
          }
        }
      } should be (List(Event("Hello"), Event("World")))
    }
  }

  test("Events stream should be stopped by server when client closes the connection") {
    val isRunning = SignallingRef[IO, Boolean](true).unsafeRunSync()

    val App: Service = {
      case url"/infiniteStream" =>
        val infiniteStream =
          Stream.sleep[IO](100.milliseconds).flatMap(_ => Stream.chunk(Chunk.bytes("LOL\n".getBytes("utf-8")))).repeat
        Ok(Content(infiniteStream.onFinalize(isRunning.set(false))))
    }

    withServer(Server.listen()(App)) { server =>
      await() {
        val client = Client("localhost", server.port)
        (IO.sleep(1.second) >> IO(client.stopSync())).unsafeRunAsync(_ => ())
        client.run(Get("/infiniteStream")) { response =>
          response.readAs[String]
        }
      }

      eventually({
        val t = isRunning.get.unsafeRunSync()
        t shouldBe false
      })
    }
  }

  test("Not an events stream") {
    withServer(Server.listen()(App)) { server =>
      the [Error] thrownBy await() {
        Client("localhost", server.port).runAndStop { client =>
          client.run(Get("/")) { response =>
            response.readAs[Stream[IO,Event[String]]].flatMap { eventStream =>
              eventStream.compile.toVector.map(_.toList)
            }
          }
        }
      } should be (Error.UnexpectedContentType())
    }
  }

  test("Invalid events stream ") {
    withServer(Server.listen()(App)) { server =>
      await() {
        Client("localhost", server.port).runAndStop { client =>
          client.run(Get("/fakeStream")) { response =>
            response.readAs[Stream[IO,Event[String]]].flatMap { eventStream =>
              eventStream.compile.toVector.map(_.toList)
            }
          }
        }
      } should be (Nil)
    }
  }

} 
Example 34
Source File: StreamingTests.scala    From lolhttp   with Apache License 2.0 5 votes vote down vote up
package lol.http.examples

import lol.http._

import cats.effect.{ContextShift, IO, Timer}
import fs2.{Chunk, Pipe, Pull, Stream}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class StreamingTests extends Tests {

  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global
  implicit val timer: Timer[IO] = IO.timer(ec)
  implicit val cs: ContextShift[IO] = IO.contextShift(ec)

  def now = System.currentTimeMillis
  val `10Meg` = 10 * 1024 * 1024

  // Transform the stream into packets of 10Meg
  def rechunk: Pipe[IO,Byte,Chunk[Byte]] =
    _.repeatPull(_.unconsN(`10Meg`, true).flatMap {
      case Some((chunks, h)) =>
        Pull.output1(chunks) as Some(h)
      case None =>
        Pull.pure(None)
    })

  test("Slow server read", Slow) {
    withServer(Server.listen() { req =>
      val start = now
      // Read at most 3Meg per second
      req.read(
        _.through(rechunk).
          evalMap(c => IO(println(s"${c.size} bytes received"))).
          flatMap(_ => Stream.sleep[IO](3.seconds))
          .compile.drain
      ).map { _ =>
        Ok(s"${now - start}")
      }
    }) { server =>
      val start = now
      var end = 0:Long

      // Send 100M as fast as possible
      val timeToReceive = contentString(
        Post(
          s"http://localhost:${server.port}/",
          content = Content(
            stream = Stream.eval(IO {
              println(s"${`10Meg`} bytes sent")
              end = now
              Chunk.bytes(("." * `10Meg`).getBytes)
            }).repeat.take(10).flatMap(c => Stream.chunk(c))
          )
        ).addHeaders(h"Content-Length" -> h"${10 * `10Meg`}"),
        atMost = 2.minutes
      ).toInt
      val timeToSend = (end - start).toInt

      println(s"Received in ${timeToReceive/1000}s")
      println(s"Sent in ${timeToSend/1000}s")

      timeToReceive should be > 25000
      timeToSend should be > 15000
    }
  }
  test("Client read compressed", Slow) {
    withServer(Server.listen() { req =>
      Ok(Content(Stream.eval(IO {
        println(s"sent ${`10Meg`} bytes")
        Chunk.bytes(("." * `10Meg`).getBytes)
      }).repeat.take(10).flatMap(c => Stream.chunk(c))))
        .addHeaders(h"Content-Length" -> h"${10 * `10Meg`}")
    }) { server =>
      await(atMost = 2.minutes) {
        Client("localhost", server.port).runAndStop { client =>
          for {
            length1 <- client.run(Get("/a"))(_.readSuccess { stream =>
              stream.chunks.map(_.size).compile.fold(0)(_ + _)
            })
            length2 <- client.run(Get("/b").addHeaders(h"Accept-Encoding" -> h"gzip"))(_.readSuccess { stream =>
              stream.chunks.map(_.size).compile.fold(0)(_ + _)
            })
            length3 <- client.run(Get("/c").addHeaders(h"Accept-Encoding" -> h"deflate"))(_.readSuccess { stream =>
              stream.chunks.map(_.size).compile.fold(0)(_ + _)
            })
          } yield {
            length1 shouldEqual 10 * `10Meg`
            length2 shouldEqual length1
            length3 shouldEqual length1
          }
        }
      }
    }
  }

} 
Example 35
Source File: FTracingSpec.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.doobie

import cats.effect.{ContextShift, IO}
import io.opencensus.scala.Tracing
import io.opencensus.scala.http.testSuite.MockTracing
import io.opencensus.trace.{BlankSpan, Status}
import org.scalatest.{OptionValues, Outcome}

import scala.concurrent.ExecutionContext.global
import scala.util.Try
import org.scalatest.flatspec
import org.scalatest.matchers.should.Matchers

class FTracingSpec
    extends flatspec.FixtureAnyFlatSpec
    with Matchers
    with OptionValues {

  implicit val cs: ContextShift[IO] = IO.contextShift(global)

  case class TestInput(fTracing: FTracing[IO], mock: MockTracing)
  override protected def withFixture(test: OneArgTest): Outcome =
    test(clientTracingWithMock())

  override type FixtureParam = TestInput

  behavior of "FTracingSpec"

  it should "start with the correct name" in { f =>
    f.fTracing.traceF(IO(()), "testSpan", None).unsafeRunSync()
    f.mock.startedSpans should have size 1
    f.mock.startedSpans.head.name shouldBe "testSpan"
  }

  it should "trace with parent Span" in { f =>
    val parentSpan = BlankSpan.INSTANCE

    f.fTracing.traceF(IO(()), "testSpan", Some(parentSpan)).unsafeRunSync()
    f.mock.startedSpans should have size 1
    f.mock.startedSpans.head.parentContext.value shouldBe parentSpan.getContext
  }

  it should "stop after normal exit" in { f =>
    f.fTracing.traceF(IO(()), "testSpan", None).unsafeRunSync()
    f.mock.endedSpans should have size 1
    f.mock.endedSpans.head._2.value.getCanonicalCode shouldBe Status.OK.getCanonicalCode
  }

  it should "stop after error" in { f =>
    Try(
      f.fTracing
        .traceF(IO.raiseError(new Exception("TEST")), "testSpan", None)
        .unsafeRunSync()
    )
    f.mock.endedSpans should have size 1
    f.mock.endedSpans.head._2.value.getCanonicalCode shouldBe Status.INTERNAL.getCanonicalCode
  }

  def clientTracingWithMock() = {
    val mockTracing = new MockTracing
    val fTracing = new FTracing[IO] {
      override protected val tracing: Tracing = mockTracing
    }
    TestInput(fTracing, mockTracing)
  }
} 
Example 36
Source File: DoobieCheckSpec.scala    From sup   with Apache License 2.0 5 votes vote down vote up
package sup

import _root_.doobie.Transactor
import cats.effect.Async
import cats.effect.ContextShift
import cats.effect.IO
import cats.effect.Timer
import scala.concurrent.duration._
import cats.implicits._
import scala.concurrent.ExecutionContext

class DoobieCheckSpec extends BaseIOTest {

  def goodTransactor[F[_]: Async: ContextShift]: Transactor[F] =
    Transactor.fromDriverManager[F]("org.h2.Driver", "jdbc:h2:mem:")

  def badTransactor[F[_]: Async: ContextShift]: Transactor[F] =
    Transactor.fromDriverManager[F]("org.h2.Driver", "jdbcfoobarnope")

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)

  "IO H2 check" when {
    "the database responds before the timeout" should {
      "be Healthy" in runIO {
        val healthCheck = modules.doobie.connectionCheck(goodTransactor[IO])(timeout = 5.seconds.some)

        healthCheck.check.map {
          _.value shouldBe Health.Healthy
        }
      }
    }

    "there is no timeout" should {
      "be Healthy" in runIO {
        val healthCheck = modules.doobie.connectionCheck(goodTransactor[IO])(timeout = none)

        healthCheck.check.map {
          _.value shouldBe Health.Healthy
        }
      }
    }
  }
} 
Example 37
Source File: ServerSpec.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext

import cats.effect.{ IO, Blocker, ContextShift }

import org.scalatest.{ FlatSpec, Matchers, BeforeAndAfterAll }

import fs2.{ Stream, Chunk }

import scodec.bits._
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

class ServerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  val ex = Executors.newCachedThreadPool()
  val ec = ExecutionContext.fromExecutor(ex)
  val bl = Blocker.liftExecutionContext(ec)
  val (sg, closeSg) = fs2.io.tcp.SocketGroup[IO](bl).allocated.unsafeRunSync()

  override def afterAll(): Unit = {
    super.afterAll()
    closeSg.unsafeRunSync()
    ex.shutdown()
  }

  "Server" should "respond to a request" in {
    val responses: Vector[Response] = Stream(
      Server.serve(Server.port, sg).drain,
      client(Server.port)
    ).parJoin(Int.MaxValue).take(1L).compile.toVector.unsafeRunSync()
    responses should === (Vector(Ok))
  }

  def client(port: Int): Stream[IO, Response] = {
    Stream.resource(sg.client[IO](Server.addr(port))).flatMap { socket =>
      val bvs: Stream[IO, BitVector] = Stream(Codec[Request].encode(ReSeed(56)).require)
      val bs: Stream[IO, Byte] = bvs.flatMap { bv =>
        Stream.chunk(Chunk.bytes(bv.bytes.toArray))
      }
      val read = bs.through(socket.writes(Server.timeout)).drain.onFinalize(socket.endOfOutput) ++
        socket.reads(Server.bufferSize, Server.timeout).chunks.map(ch => BitVector.view(ch.toArray))
      read.fold(BitVector.empty)(_ ++ _).map(bv => Codec[Response].decode(bv).require.value)
    }
  }
} 
Example 38
Source File: Client.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.net.{ InetAddress, InetSocketAddress }

import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration._

import cats.effect.{ IO, ContextShift }

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import akka.util.{ ByteString }

import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }

import fs2.interop.reactivestreams._

import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }

import Protocol.v1.{ Request, Response, Seed, Random }

object Client {

  val reqCodec: StreamEncoder[Request] = streamEncoderFromReified[Request]
  val resCodec: StreamDecoder[Response] = streamDecoderFromReified[Response]

  def main(args: Array[String]): Unit = {
    implicit val sys: ActorSystem = ActorSystem("ClientSystem")
    implicit val mat: Materializer = ActorMaterializer()
    try {
      val resp = Await.result(client(1234), 10.seconds)
      println(resp)
    } finally {
      sys.terminate()
    }
  }

  def client(port: Int)(implicit sys: ActorSystem, mat: Materializer): Future[Vector[Response]] = {
    val addr = new InetSocketAddress(InetAddress.getLoopbackAddress, port)
    Tcp().outgoingConnection(addr).joinMat(logic)(Keep.right).run()
  }

  def logic(implicit sys: ActorSystem): Flow[ByteString, ByteString, Future[Vector[Response]]] = {

    implicit val cs: ContextShift[IO] = IO.contextShift(sys.dispatcher)

    val requests = fs2.Stream(Seed(0xabcdL), Random(1, 100)).covary[IO]
    val source = Source
      .fromPublisher(reqCodec.encode(requests).toUnicastPublisher())
      .map(bv => ByteString.fromArrayUnsafe(bv.toByteArray))

    // TODO: this would be much less ugly, if we had a decoder `Flow`
    val buffer = fs2.concurrent.Queue.unbounded[IO, Option[BitVector]].unsafeRunSync()
    val decode: Flow[ByteString, Response, NotUsed] = Flow.fromSinkAndSource(
      Sink.onComplete { _ =>
        buffer.enqueue1(None).unsafeRunSync()
      }.contramap[ByteString] { x =>
        buffer.enqueue1(Some(BitVector.view(x.toArray))).unsafeRunSync()
      },
      Source.fromPublisher(buffer
        .dequeue
        .unNoneTerminate
        .through(decPipe[IO, Response])
        .toUnicastPublisher()
      )
    )
    val sink: Sink[ByteString, Future[Vector[Response]]] = decode.toMat(
      Sink.fold(Vector.empty[Response])(_ :+ _)
    )(Keep.right)

    Flow.fromSinkAndSourceMat(sink, source)(Keep.left)
  }
} 
Example 39
Source File: BoxStoreTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore.box


import java.util.concurrent.Executors

import blobstore.Path
import cats.effect.{Blocker, ContextShift, IO}
import com.box.sdk.BoxAPIConnection
import org.scalatest.matchers.must.Matchers
import org.scalatest.flatspec.AnyFlatSpec

import scala.concurrent.ExecutionContext

class BoxStoreTest extends AnyFlatSpec with Matchers {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))
  "splitPath" should "correctly split a long path" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("long/path/to/filename")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be("long" :: "path" :: "to" :: Nil)
    key must be("filename")
  }

  it should "split a single element path into a single element list and empty string key" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("filename")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be("filename"::Nil)
    key must be("")
  }

  it should "split an empty path into empty list, empty string key" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be(""::Nil)
    key must be("")
  }

} 
Example 40
Source File: SftpStore.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore
package sftp

import java.util.Date

import com.jcraft.jsch._
import cats.instances.option._

import scala.util.Try
import java.io.OutputStream

import cats.Traverse
import cats.effect.{Blocker, ConcurrentEffect, ContextShift, IO, Resource}
import cats.effect.concurrent.{MVar, Semaphore}
import fs2.concurrent.Queue

final class SftpStore[F[_]](
  absRoot: String,
  session: Session,
  blocker: Blocker,
  mVar: MVar[F, ChannelSftp],
  semaphore: Option[Semaphore[F]],
  connectTimeout: Int
)(implicit F: ConcurrentEffect[F], CS: ContextShift[F]) extends Store[F] {
  import implicits._

  import Path.SEP

  private val openChannel: F[ChannelSftp] = {
    val openF = blocker.delay{
      val ch = session.openChannel("sftp").asInstanceOf[ChannelSftp]
      ch.connect(connectTimeout)
      ch
    }
    semaphore.fold(openF){s =>
      F.ifM(s.tryAcquire)(openF, getChannel)
    }
  }

  private val getChannel = F.flatMap(mVar.tryTake) {
    case Some(channel) => F.pure(channel)
    case None => openChannel
  }

  private def channelResource: Resource[F, ChannelSftp] = Resource.make{
    getChannel
  }{
    case ch if ch.isClosed => F.unit
    case ch => F.ifM(mVar.tryPut(ch))(F.unit, SftpStore.closeChannel(semaphore, blocker)(ch))
  }

  
  def apply[F[_]](
    absRoot: String,
    fa: F[Session],
    blocker: Blocker,
    maxChannels: Option[Long] = None,
    connectTimeout: Int = 10000
  )(implicit F: ConcurrentEffect[F], CS: ContextShift[F]): fs2.Stream[F, SftpStore[F]] =
    if (maxChannels.exists(_ < 1)) {
      fs2.Stream.raiseError[F](new IllegalArgumentException(s"maxChannels must be >= 1"))
    } else {
      for {
        session <- fs2.Stream.bracket(fa)(session => F.delay(session.disconnect()))
        semaphore <- fs2.Stream.eval(Traverse[Option].sequence(maxChannels.map(Semaphore.apply[F])))
        mVar <- fs2.Stream.bracket(MVar.empty[F, ChannelSftp])(mVar => F.flatMap(mVar.tryTake)(_.fold(F.unit)(closeChannel[F](semaphore, blocker))))
      } yield new SftpStore[F](absRoot, session, blocker, mVar, semaphore, connectTimeout)
    }

  private def closeChannel[F[_]](semaphore: Option[Semaphore[F]], blocker: Blocker)(ch: ChannelSftp)(implicit F: ConcurrentEffect[F], CS: ContextShift[F]): F[Unit] =
    F.productR(semaphore.fold(F.unit)(_.release))(blocker.delay(ch.disconnect()))
} 
Example 41
Source File: GcsStore.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore.gcs

import java.nio.channels.Channels
import java.time.Instant
import java.util.Date

import blobstore.{Path, Store}
import cats.effect.{Blocker, ContextShift, Sync}
import com.google.api.gax.paging.Page
import com.google.cloud.storage.{Acl, Blob, BlobId, BlobInfo, Storage}
import com.google.cloud.storage.Storage.{BlobListOption, CopyRequest}
import fs2.{Chunk, Pipe, Stream}

import scala.jdk.CollectionConverters._

final class GcsStore[F[_]](storage: Storage, blocker: Blocker, acls: List[Acl] = Nil)(implicit F: Sync[F], CS: ContextShift[F]) extends Store[F] {

  private def _chunk(pg: Page[Blob]): Chunk[Path] = {
    val (dirs, files) = pg.getValues.asScala.toSeq.partition(_.isDirectory)
    val dirPaths = Chunk.seq(dirs.map(b => Path(root = b.getBucket, key = b.getName.stripSuffix("/"), size = None, isDir = true, lastModified = None)))
    val filePaths = Chunk.seq(files.map{b =>
      val size = Option(b.getSize: java.lang.Long).map(_.toLong) // Prevent throwing NPE (see https://github.com/scala/bug/issues/9634)
      val lastModified = Option(b.getUpdateTime: java.lang.Long).map(millis => Date.from(Instant.ofEpochMilli(millis))) // Prevent throwing NPE (see https://github.com/scala/bug/issues/9634)
      Path(b.getBucket, key = b.getName, size = size, isDir = false, lastModified = lastModified)
    })
    Chunk.concat(List(dirPaths, filePaths))
  }

  def list(path: Path): fs2.Stream[F, Path] = {
    Stream.unfoldChunkEval[F, () => Option[Page[Blob]], Path]{
      () => Some(storage.list(path.root, BlobListOption.currentDirectory(), BlobListOption.prefix(path.key)))
    }{getPage =>
      blocker.delay{
        getPage().map{pg =>
          if (pg.hasNextPage){
            (_chunk(pg), () => Some(pg.getNextPage))
          } else {
            (_chunk(pg), () => None)
          }
        }
      }
    }
  }

  def get(path: Path, chunkSize: Int): fs2.Stream[F, Byte] = {
    val is = blocker.delay(Channels.newInputStream(storage.get(path.root, path.key).reader()))
    fs2.io.readInputStream(is, chunkSize, blocker, closeAfterUse = true)
  }

  def put(path: Path): Pipe[F, Byte, Unit] = {
    val fos = Sync[F].delay{
      val builder = {
        val b = BlobInfo.newBuilder(path.root, path.key)
        if (acls.nonEmpty) b.setAcl(acls.asJava) else b
      }
      val blobInfo = builder.build()
      val writer = storage.writer(blobInfo)
      Channels.newOutputStream(writer)
    }
    fs2.io.writeOutputStream(fos, blocker, closeAfterUse = true)
  }

  def move(src: Path, dst: Path): F[Unit] = F.productR(copy(src, dst))(remove(src))

  def copy(src: Path, dst: Path): F[Unit] = {
    val req = CopyRequest.newBuilder().setSource(src.root, src.key).setTarget(BlobId.of(dst.root, dst.key)).build()
    F.void(blocker.delay(storage.copy(req).getResult))
  }

  def remove(path: Path): F[Unit] =
    F.void(blocker.delay(storage.delete(path.root, path.key)))
}


object GcsStore{
  def apply[F[_]](
    storage: Storage,
    blocker: Blocker,
    acls: List[Acl]
  )(implicit F: Sync[F], CS: ContextShift[F]): GcsStore[F] = new GcsStore(storage, blocker, acls)
} 
Example 42
Source File: FileStore.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore
package fs

import java.nio.file.{Files, Paths, Path => NioPath}
import java.util.Date

import scala.jdk.CollectionConverters._
import cats.implicits._
import cats.effect.{Blocker, ContextShift, Sync}
import fs2.{Pipe, Stream}

final class FileStore[F[_]](fsroot: NioPath, blocker: Blocker)(implicit F: Sync[F], CS: ContextShift[F]) extends Store[F] {
  val absRoot: String = fsroot.toAbsolutePath.normalize.toString

  override def list(path: Path): fs2.Stream[F, Path] = {
    val isDir = Stream.eval(F.delay(Files.isDirectory(path)))
    val isFile = Stream.eval(F.delay(Files.exists(path)))

    val files = Stream.eval(F.delay(Files.list(path)))
      .flatMap(x => Stream.fromIterator(x.iterator.asScala))
      .evalMap(x => F.delay(
        Path(x.toAbsolutePath.toString.replaceFirst(absRoot, "")).copy(
          size = Option(Files.size(x)),
          isDir = Files.isDirectory(x),
          lastModified = Option(new Date(Files.getLastModifiedTime(path).toMillis))
        )
      ))

    val file = fs2.Stream.eval {
      F.delay {
        path.copy(
          size = Option(Files.size(path)),
          lastModified = Option(new Date(Files.getLastModifiedTime(path).toMillis))
        )
      }
    }

    isDir.ifM(files, isFile.ifM(file, Stream.empty.covaryAll[F, Path]))
  }

  override def get(path: Path, chunkSize: Int): fs2.Stream[F, Byte] = fs2.io.file.readAll[F](path, blocker, chunkSize)

  override def put(path: Path): Pipe[F, Byte, Unit] = { in =>
    val mkdir = Stream.eval(F.delay(Files.createDirectories(_toNioPath(path).getParent)).as(true))
    mkdir.ifM(
      fs2.io.file.writeAll(path, blocker).apply(in),
      Stream.raiseError[F](new Exception(s"failed to create dir: $path"))
    )
  }

  override def move(src: Path, dst: Path): F[Unit] = F.delay {
    Files.createDirectories(_toNioPath(dst).getParent)
    Files.move(src, dst)
  }.void

  override def copy(src: Path, dst: Path): F[Unit] = {
    F.delay {
      Files.createDirectories(_toNioPath(dst).getParent)
      Files.copy(src, dst)
    }.void
  }

  override def remove(path: Path): F[Unit] = F.delay({
    Files.deleteIfExists(path)
    ()
  })

  implicit private def _toNioPath(path: Path): NioPath =
    Paths.get(absRoot, path.root, path.key)

}

object FileStore{
  def apply[F[_]](fsroot: NioPath, blocker: Blocker)(implicit F: Sync[F], CS: ContextShift[F]): FileStore[F] = new FileStore(fsroot, blocker)
} 
Example 43
Source File: package.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
import java.io.OutputStream
import java.nio.file.Files

import cats.effect.{ContextShift, Sync, Blocker}
import fs2.{Pipe, Pull, Stream}
import cats.implicits._

package object blobstore {
  protected[blobstore] def _writeAllToOutputStream1[F[_]](in: Stream[F, Byte], out: OutputStream, blocker: Blocker)(
    implicit F: Sync[F], CS: ContextShift[F]): Pull[F, Nothing, Unit] = {
    in.pull.uncons.flatMap {
      case None => Pull.done
      case Some((hd, tl)) => Pull.eval[F, Unit](blocker.delay(out.write(hd.toArray))) >> _writeAllToOutputStream1(tl, out, blocker)
    }
  }

  protected[blobstore] def bufferToDisk[F[_]](chunkSize: Int, blocker: Blocker)(implicit F: Sync[F], CS: ContextShift[F])
  : Pipe[F, Byte, (Long, Stream[F, Byte])] = {
    in => Stream.bracket(F.delay(Files.createTempFile("bufferToDisk", ".bin")))(
      p => F.delay(p.toFile.delete).void).flatMap { p =>
        in.through(fs2.io.file.writeAll(p, blocker)).drain ++
        Stream.emit((p.toFile.length, fs2.io.file.readAll(p, blocker, chunkSize)))
    }
  }

} 
Example 44
Source File: IngestionFlowSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import cats.effect.{Concurrent, ContextShift, IO}
import hydra.avro.registry.SchemaRegistry
import hydra.core.ingest.HydraRequest
import hydra.core.ingest.RequestParams.{HYDRA_KAFKA_TOPIC_PARAM,HYDRA_RECORD_KEY_PARAM}
import hydra.ingest.services.IngestionFlow.MissingTopicNameException
import hydra.kafka.algebras.KafkaClientAlgebra
import org.apache.avro.{Schema, SchemaBuilder}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.ExecutionContext

class IngestionFlowSpec extends AnyFlatSpec with Matchers {

  private implicit val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect
  private implicit val mode: scalacache.Mode[IO] = scalacache.CatsEffect.modes.async

  private val testSubject: String = "test_subject"

  private val testSubjectNoKey: String = "test_subject_no_key"

  private val testKey: String = "test"

  private val testPayload: String =
    s"""{"id": "$testKey", "testField": true}"""

  private val testSchema: Schema = SchemaBuilder.record("TestRecord")
    .prop("hydra.key", "id")
    .fields().requiredString("id").requiredBoolean("testField").endRecord()

  private val testSchemaNoKey: Schema = SchemaBuilder.record("TestRecordNoKey")
    .fields().requiredString("id").requiredBoolean("testField").endRecord()

  private def ingest(request: HydraRequest): IO[KafkaClientAlgebra[IO]] = for {
    schemaRegistry <- SchemaRegistry.test[IO]
    _ <- schemaRegistry.registerSchema(testSubject + "-value", testSchema)
    _ <- schemaRegistry.registerSchema(testSubjectNoKey + "-value", testSchemaNoKey)
    kafkaClient <- KafkaClientAlgebra.test[IO]
    ingestFlow <- IO(new IngestionFlow[IO](schemaRegistry, kafkaClient, "https://schemaRegistry.notreal"))
    _ <- ingestFlow.ingest(request)
  } yield kafkaClient

  it should "ingest a message" in {
    val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_KAFKA_TOPIC_PARAM -> testSubject))
    ingest(testRequest).flatMap { kafkaClient =>
      kafkaClient.consumeStringKeyMessages(testSubject, "test-consumer").take(1).compile.toList.map { publishedMessages =>
        val firstMessage = publishedMessages.head
        (firstMessage._1, firstMessage._2.get.toString) shouldBe (Some(testKey), testPayload)
      }
    }.unsafeRunSync()
  }

  it should "ingest a message with a null key" in {
    val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_KAFKA_TOPIC_PARAM -> testSubjectNoKey))
    ingest(testRequest).flatMap { kafkaClient =>
      kafkaClient.consumeStringKeyMessages(testSubjectNoKey, "test-consumer").take(1).compile.toList.map { publishedMessages =>
        val firstMessage = publishedMessages.head
        (firstMessage._1, firstMessage._2.get.toString) shouldBe (None, testPayload)
      }
    }.unsafeRunSync()
  }

  it should "return an error when no topic name is provided" in {
    val testRequest = HydraRequest("correlationId", testPayload)
    ingest(testRequest).attempt.unsafeRunSync() shouldBe Left(MissingTopicNameException(testRequest))
  }

  it should "take the key from the header if present" in {
    val headerKey = "someDifferentKey"
    val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_RECORD_KEY_PARAM -> headerKey, HYDRA_KAFKA_TOPIC_PARAM -> testSubject))
    ingest(testRequest).flatMap { kafkaClient =>
      kafkaClient.consumeStringKeyMessages(testSubject, "test-consumer").take(1).compile.toList.map { publishedMessages =>
        val firstMessage = publishedMessages.head
        (firstMessage._1, firstMessage._2.get.toString) shouldBe (Some(headerKey), testPayload)
      }
    }.unsafeRunSync()

  }

} 
Example 45
Source File: KafkaAdminAlgebra.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import cats.effect.concurrent.Ref
import cats.effect.{Async, Concurrent, ContextShift, Resource, Sync}
import cats.implicits._
import fs2.kafka._
import hydra.core.protocol._
import hydra.kafka.util.KafkaUtils.TopicDetails
import org.apache.kafka.clients.admin.NewTopic
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException

import scala.util.control.NoStackTrace


  def deleteTopic(name: String): F[Unit]
}

object KafkaAdminAlgebra {

  type TopicName = String
  final case class Topic(name: TopicName, numberPartitions: Int)

  def live[F[_]: Sync: Concurrent: ContextShift](
      bootstrapServers: String,
  ): F[KafkaAdminAlgebra[F]] = Sync[F].delay {
    new KafkaAdminAlgebra[F] {

      override def describeTopic(name: TopicName): F[Option[Topic]] = {
        getAdminClientResource
          .use(_.describeTopics(name :: Nil))
          .map(_.headOption.map(_._2).map { td =>
            Topic(td.name(), td.partitions().size())
          })
          .recover {
            case _: UnknownTopicOrPartitionException => None
          }
      }

      override def getTopicNames: F[List[TopicName]] =
        getAdminClientResource.use(_.listTopics.names.map(_.toList))

      override def createTopic(name: TopicName, d: TopicDetails): F[Unit] = {
        import scala.collection.JavaConverters._
        val newTopic = new NewTopic(name, d.numPartitions, d.replicationFactor)
          .configs(d.configs.asJava)
        getAdminClientResource.use(_.createTopic(newTopic))
      }

      override def deleteTopic(name: String): F[Unit] =
        getAdminClientResource.use(_.deleteTopic(name))

      private def getAdminClientResource: Resource[F, KafkaAdminClient[F]] = {
        adminClientResource(
          AdminClientSettings.apply.withBootstrapServers(bootstrapServers)
        )
      }
    }
  }

  def test[F[_]: Sync]: F[KafkaAdminAlgebra[F]] =
    Ref[F].of(Map[TopicName, Topic]()).flatMap(getTestKafkaClient[F])

  private[this] def getTestKafkaClient[F[_]: Sync](
      ref: Ref[F, Map[TopicName, Topic]]
  ): F[KafkaAdminAlgebra[F]] = Sync[F].delay {
    new KafkaAdminAlgebra[F] {
      override def describeTopic(name: TopicName): F[Option[Topic]] =
        ref.get.map(_.get(name))

      override def getTopicNames: F[List[TopicName]] =
        ref.get.map(_.keys.toList)

      override def createTopic(
          name: TopicName,
          details: TopicDetails
      ): F[Unit] = {
        val entry = name -> Topic(name, details.numPartitions)
        ref.update(old => old + entry)
      }

      override def deleteTopic(name: String): F[Unit] =
        ref.update(_ - name)
    }
  }

} 
Example 46
Source File: KafkaAdminAlgebraSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import akka.actor.ActorSystem
import cats.effect.{ContextShift, IO}
import cats.implicits._
import hydra.kafka.util.KafkaUtils.TopicDetails
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

final class KafkaAdminAlgebraSpec
    extends AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with EmbeddedKafka {

  private val port = 8023

  implicit private val kafkaConfig: EmbeddedKafkaConfig =
    EmbeddedKafkaConfig(kafkaPort = port, zooKeeperPort = 3027)

  implicit private val contextShift: ContextShift[IO] =
    IO.contextShift(ExecutionContext.global)

  implicit private val system: ActorSystem = ActorSystem(
    "kafka-client-spec-system"
  )

  override def beforeAll(): Unit = {
    super.beforeAll()
    EmbeddedKafka.start()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    EmbeddedKafka.stop()
  }

  (for {
    live <- KafkaAdminAlgebra
      .live[IO](s"localhost:$port")
    test <- KafkaAdminAlgebra.test[IO]
  } yield {
    runTests(live)
    runTests(test, isTest = true)
  }).unsafeRunSync()

  private def runTests(kafkaClient: KafkaAdminAlgebra[IO], isTest: Boolean = false): Unit = {
    (if (isTest) "KafkaAdmin#test" else "KafkaAdmin#live") must {
      "create a topic" in {
        val topicName = "Topic1"
        val topicDetails = TopicDetails(3, 1.toShort)
        (kafkaClient.createTopic(topicName, topicDetails) *> kafkaClient
          .describeTopic(topicName)
          .map {
            case Some(topic) =>
              topic.name shouldBe topicName
              topic.numberPartitions shouldBe topicDetails.numPartitions
            case None => fail("Found None when a Topic was Expected")
          }).unsafeRunSync()
      }

      "list all topics" in {
        kafkaClient.getTopicNames.unsafeRunSync() shouldBe List("Topic1")
      }

      "delete a topic" in {
        val topicToDelete = "topic_to_delete"
        (for {
          _ <- kafkaClient.createTopic(topicToDelete, TopicDetails(1, 1))
          _ <- kafkaClient.deleteTopic(topicToDelete)
          maybeTopic <- kafkaClient.describeTopic(topicToDelete)
        } yield maybeTopic should not be defined).unsafeRunSync()
      }
    }
  }
} 
Example 47
Source File: MetadataAlgebraSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import java.time.Instant

import cats.data.NonEmptyList
import cats.effect.{Concurrent, ContextShift, IO, Sync, Timer}
import cats.implicits._
import hydra.avro.registry.SchemaRegistry
import hydra.core.marshallers.History
import hydra.kafka.algebras.MetadataAlgebra.TopicMetadataContainer
import hydra.kafka.model.ContactMethod.Slack
import hydra.kafka.model.TopicMetadataV2Request.Subject
import hydra.kafka.model.{Public, StreamTypeV2, TopicMetadataV2, TopicMetadataV2Key, TopicMetadataV2Request, TopicMetadataV2Value}
import io.chrisdavenport.log4cats.SelfAwareStructuredLogger
import io.chrisdavenport.log4cats.slf4j.Slf4jLogger
import org.apache.avro.generic.GenericRecord
import org.scalatest.Assertion
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import retry.RetryPolicies._
import retry.syntax.all._
import retry.{RetryPolicy, _}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class MetadataAlgebraSpec extends AnyWordSpecLike with Matchers {

  implicit private val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect

  private implicit val policy: RetryPolicy[IO] = limitRetries[IO](5) |+| exponentialBackoff[IO](500.milliseconds)
  private implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)
  private implicit def noop[A]: (A, RetryDetails) => IO[Unit] = retry.noop[IO, A]

  implicit private def unsafeLogger[F[_]: Sync]: SelfAwareStructuredLogger[F] =
    Slf4jLogger.getLogger[F]

  private implicit class RetryAndAssert[A](boolIO: IO[A]) {
    def retryIfFalse(check: A => Boolean): IO[Assertion] =
      boolIO.map(check).retryingM(identity, policy, noop).map(assert(_))
  }


  private val metadataTopicName = "_internal.metadataTopic"
  private val consumerGroup = "Consumer Group"

  (for {
    kafkaClient <- KafkaClientAlgebra.test[IO]
    schemaRegistry <- SchemaRegistry.test[IO]
    metadata <- MetadataAlgebra.make(metadataTopicName, consumerGroup, kafkaClient, schemaRegistry, consumeMetadataEnabled = true)
  } yield {
    runTests(metadata, kafkaClient)
  }).unsafeRunSync()

  private def runTests(metadataAlgebra: MetadataAlgebra[IO], kafkaClientAlgebra: KafkaClientAlgebra[IO]): Unit = {
    "MetadataAlgebraSpec" should {

      "retrieve none for non-existant topic" in {
        val subject = Subject.createValidated("Non-existantTopic").get
        metadataAlgebra.getMetadataFor(subject).unsafeRunSync() shouldBe None
      }

      "retrieve metadata" in {
        val subject = Subject.createValidated("subject1").get
        val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject)

        (for {
          record <- genericRecordsIO
          _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName)
          _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined)
          metadata <- metadataAlgebra.getMetadataFor(subject)
        } yield metadata shouldBe Some(TopicMetadataContainer(key, value, None, None))).unsafeRunSync()
      }

      "retrieve all metadata" in {
        val subject = Subject.createValidated("subject2").get
        val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject)
        (for {
          record <- genericRecordsIO
          _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName)
          _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined)
          allMetadata <- metadataAlgebra.getAllMetadata
        } yield allMetadata should have length 2).unsafeRunSync()
      }
    }
  }

  private def getMetadataGenericRecords(subject: Subject): (IO[(GenericRecord, Option[GenericRecord])], TopicMetadataV2Key, TopicMetadataV2Value) = {
    val key = TopicMetadataV2Key(subject)
    val value = TopicMetadataV2Value(
        StreamTypeV2.Entity,
        deprecated = false,
        Public,
        NonEmptyList.one(Slack.create("#channel").get),
        Instant.now,
        List(),
        None)
    (TopicMetadataV2.encode[IO](key, Some(value)), key, value)
  }
} 
Example 48
Source File: OutwatchSpec.scala    From outwatch   with Apache License 2.0 5 votes vote down vote up
package outwatch

import scala.concurrent.Future
import cats.effect.ContextShift
import cats.effect.IO
import monix.execution.Ack.Continue
import monix.execution.ExecutionModel.SynchronousExecution
import monix.execution.schedulers.TrampolineScheduler
import monix.execution.{Cancelable, Scheduler}
import monix.reactive.Observable
import org.scalajs.dom.{document, window}
import org.scalatest.BeforeAndAfterEach
import org.scalatest._
import outwatch.Deprecated.IgnoreWarnings.initEvent
import org.scalatest.flatspec.{ AnyFlatSpec, AsyncFlatSpec }
import org.scalatest.matchers.should.Matchers

trait EasySubscribe {

  implicit class Subscriber[T](obs: Observable[T]) {
    def apply(next: T => Unit)(implicit s: Scheduler): Cancelable = obs.subscribe { t =>
      next(t)
      Continue
    }
  }
}

// TODO: We need this mock until localStorage is implemented in jsdom (https://github.com/tmpvar/jsdom/pull/2076)
trait LocalStorageMock {
  import scala.collection.mutable
  import scala.scalajs.js


  if (js.isUndefined(window.localStorage)) {
    val storageObject = new js.Object {
      private val map = new mutable.HashMap[String, String]

      def getItem(key: String): String = map.getOrElse(key, null)

      def setItem(key: String, value: String): Unit = {
        map += key -> value
      }

      def removeItem(key: String): Unit = {
        map -= key
      }

      def clear(): Unit = map.clear()
    }

    js.Dynamic.global.window.updateDynamic("localStorage")(storageObject)
  }

  def dispatchStorageEvent(key: String, newValue: String, oldValue: String): Unit = {
    if (key == null) window.localStorage.clear()
    else window.localStorage.setItem(key, newValue)

    val event = document.createEvent("Events")
    initEvent(event)("storage", canBubbleArg = true, cancelableArg = false)
    event.asInstanceOf[js.Dynamic].key = key
    event.asInstanceOf[js.Dynamic].newValue = newValue
    event.asInstanceOf[js.Dynamic].oldValue = oldValue
    event.asInstanceOf[js.Dynamic].storageArea = window.localStorage
    window.dispatchEvent(event)
    ()
  }
}

trait OutwatchSpec extends Matchers with BeforeAndAfterEach with EasySubscribe with LocalStorageMock { self: Suite =>

  implicit val scheduler: TrampolineScheduler = TrampolineScheduler(Scheduler.global, SynchronousExecution)
  implicit val cs: ContextShift[IO] = IO.contextShift(scheduler)

  override def beforeEach(): Unit = {

    document.body.innerHTML = ""

    window.localStorage.clear()

    // prepare body with <div id="app"></div>
    val root = document.createElement("div")
    root.id = "app"
    document.body.appendChild(root)
    ()
  }

}

abstract class JSDomSpec extends AnyFlatSpec with OutwatchSpec {
  implicit def executionContext = scheduler
}
abstract class JSDomAsyncSpec extends AsyncFlatSpec with OutwatchSpec {
  override def executionContext = scheduler

  implicit def ioAssertionToFutureAssertion(io: IO[Assertion]): Future[Assertion] = io.unsafeToFuture()
} 
Example 49
Source File: MTLSpecs.scala    From shims   with Apache License 2.0 5 votes vote down vote up
package shims.effect

import cats.effect.{ContextShift, IO}
import cats.effect.laws.discipline.{arbitrary, AsyncTests, ConcurrentEffectTests, ConcurrentTests}, arbitrary._
import cats.effect.laws.util.{TestContext, TestInstances}, TestInstances._

import cats.{Eq, Functor, Monad}
import cats.instances.either._
import cats.instances.int._
import cats.instances.option._
import cats.instances.tuple._
import cats.instances.unit._
import cats.syntax.functor._

import scalaz.{EitherT, Kleisli, OptionT, StateT, WriterT}

import org.scalacheck.{Arbitrary, Prop}

import org.specs2.Specification
import org.specs2.scalacheck.Parameters
import org.specs2.specification.core.Fragments

import org.typelevel.discipline.Laws
import org.typelevel.discipline.specs2.Discipline

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

import java.io.{ByteArrayOutputStream, PrintStream}

object MTLSpecs extends Specification with Discipline {

  def is =
    br ^ checkAllAsync("OptionT[IO, ?]", implicit ctx => ConcurrentTests[OptionT[IO, ?]].concurrent[Int, Int, Int]) ^
    br ^ checkAllAsync("Kleisli[IO, Int, ?]", implicit ctx => ConcurrentTests[Kleisli[IO, Int, ?]].concurrent[Int, Int, Int]) ^
    br ^ checkAllAsync("EitherT[IO, Throwable, ?]", implicit ctx => ConcurrentEffectTests[EitherT[IO, Throwable, ?]].concurrentEffect[Int, Int, Int]) ^
    br ^ checkAllAsync("StateT[IO, Int, ?]", implicit ctx => AsyncTests[StateT[IO, Int, ?]].async[Int, Int, Int]) ^
    br ^ checkAllAsync("WriterT[IO, Int, ?]", implicit ctx => ConcurrentEffectTests[WriterT[IO, Int, ?]].concurrentEffect[Int, Int, Int])

  def checkAllAsync(name: String, f: TestContext => Laws#RuleSet)(implicit p: Parameters) = {
    val context = TestContext()
    val ruleSet = f(context)

    Fragments.foreach(ruleSet.all.properties.toList) {
      case (id, prop) =>
        s"$name.$id" ! check(Prop(p => silenceSystemErr(prop(p))), p, defaultFreqMapPretty) ^ br
    }
  }

  implicit def iocsForEC(implicit ec: ExecutionContext): ContextShift[IO] =
    IO.contextShift(ec)

  implicit def optionTArbitrary[F[_], A](implicit arbFA: Arbitrary[F[Option[A]]]): Arbitrary[OptionT[F, A]] =
    Arbitrary(arbFA.arbitrary.map(OptionT.optionT(_)))

  implicit def kleisliArbitrary[F[_], R, A](implicit arbRFA: Arbitrary[R => F[A]]): Arbitrary[Kleisli[F, R, A]] =
    Arbitrary(arbRFA.arbitrary.map(Kleisli(_)))

  implicit def eitherTArbitrary[F[_]: Functor, L, A](implicit arbEA: Arbitrary[F[Either[L, A]]]): Arbitrary[EitherT[F, L, A]] =
    Arbitrary(arbEA.arbitrary.map(fe => EitherT.eitherT(fe.map(_.asScalaz))))

  implicit def stateTArbitrary[F[_]: Monad, S, A](implicit arbSFA: Arbitrary[S => F[(S, A)]]): Arbitrary[StateT[F, S, A]] =
    Arbitrary(arbSFA.arbitrary.map(StateT(_)))

  implicit def writerTArbitrary[F[_], L, A](implicit arbFLA: Arbitrary[F[(L, A)]]): Arbitrary[WriterT[F, L, A]] =
    Arbitrary(arbFLA.arbitrary.map(WriterT(_)))

  implicit def kleisliEq[F[_], A](implicit eqv: Eq[F[A]]): Eq[Kleisli[F, Int, A]] =
    Eq.by(_(42))   // totally random and comprehensive seed

  implicit def stateTEq[F[_]: Monad, S, A](implicit eqv: Eq[F[(Int, A)]]): Eq[StateT[F, Int, A]] =
    Eq.by(_.run(42))   // totally random and comprehensive seed

  // copied from cats-effect
  private def silenceSystemErr[A](thunk: => A): A = synchronized {
    // Silencing System.err
    val oldErr = System.err
    val outStream = new ByteArrayOutputStream()
    val fakeErr = new PrintStream(outStream)
    System.setErr(fakeErr)
    try {
      val result = thunk
      System.setErr(oldErr)
      result
    } catch {
      case NonFatal(e) =>
        System.setErr(oldErr)
        // In case of errors, print whatever was caught
        fakeErr.close()
        val out = outStream.toString("utf-8")
        if (out.nonEmpty) oldErr.println(out)
        throw e
    }
  }
} 
Example 50
Source File: AjaxClient.scala    From canoe   with MIT License 5 votes vote down vote up
package canoe.api.clients

import canoe.api.{FailedMethod, ResponseDecodingError, TelegramClient}
import canoe.methods.Method
import canoe.models.Response
import cats.effect.{Async, ContextShift}
import cats.syntax.all._
import io.circe.Decoder
import io.circe.parser.decode
import org.scalajs.dom.console
import org.scalajs.dom.ext.Ajax

private[api] class AjaxClient[F[_]: Async: ContextShift](token: String) extends TelegramClient[F] {

  private val botApiUri: String = s"https://api.telegram.org/bot$token"

  
  def execute[Req, Res](request: Req)(implicit M: Method[Req, Res]): F[Res] = {
    implicit val responseDecoder: Decoder[Response[Res]] = Response.decoder[Res](M.decoder)

    sendJsonRequest(request, M).map(decode[Response[Res]]).flatMap {
      case Left(error)     => handleUnknownEntity(M.name, request, error.getMessage)
      case Right(response) => handleTelegramResponse(M, request)(response)
    }
  }

  private def handleUnknownEntity[I, A](method: String, input: I, error: String): F[A] = {
    console.error(
      s"Received unknown Telegram entity during execution of '$method' method. \nInput data: $input. \n${error}"
    )
    ResponseDecodingError(error.toString).raiseError[F, A]
  }

  private def handleTelegramResponse[A, I, C](m: Method[I, A], input: I)(response: Response[A]): F[A] =
    response match {
      case Response(true, Some(result), _, _, _) => result.pure[F]

      case failed =>
        console.error(s"Received failed response from Telegram: $failed. Method name: ${m.name}, input data: $input")
        FailedMethod(m, input, failed).raiseError[F, A]
    }

  private def sendJsonRequest[Req, Res](request: Req, method: Method[Req, Res]): F[String] = {
    val url = s"$botApiUri/${method.name}"
    val json = method.encoder.apply(request).toString

    Async
      .fromFuture(F.delay(Ajax.post(url, json, headers = Map("Content-Type" -> "application/json"))))
      .map(_.responseText)
  }
} 
Example 51
Source File: Http4sClientEndpointsJsonSchemaTest.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.http4s.client

import endpoints4s.algebra
import endpoints4s.algebra.client

import cats.effect.Sync
import org.http4s.client.Client
import cats.effect.IO
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.global
import org.http4s.client.asynchttpclient.AsyncHttpClient
import cats.effect.ContextShift
import endpoints4s.algebra.circe
import org.http4s.Uri

class TestJsonSchemaClient[F[_]: Sync](host: Uri, client: Client[F])
    extends Endpoints[F](host, client)
    with BasicAuthentication
    with JsonEntitiesFromCodecs
    with algebra.BasicAuthenticationTestApi
    with algebra.EndpointsTestApi
    with algebra.JsonFromCodecTestApi
    with algebra.SumTypedEntitiesTestApi
    with circe.JsonFromCirceCodecTestApi
    with circe.JsonEntitiesFromCodecs

class Http4sClientEndpointsJsonSchemaTest
    extends client.EndpointsTestSuite[TestJsonSchemaClient[IO]]
    with client.BasicAuthTestSuite[TestJsonSchemaClient[IO]]
    with client.JsonFromCodecTestSuite[TestJsonSchemaClient[IO]]
    with client.SumTypedEntitiesTestSuite[TestJsonSchemaClient[IO]] {

  implicit val ctx: ContextShift[IO] = IO.contextShift(global)

  val (ahc, shutdown) =
    AsyncHttpClient.allocate[IO]().unsafeRunSync()

  val client = new TestJsonSchemaClient[IO](
    Uri.unsafeFromString(s"http://localhost:$wiremockPort"),
    ahc
  )

  def call[Req, Resp](
      endpoint: client.Endpoint[Req, Resp],
      args: Req
  ): Future[Resp] = {
    Thread.sleep(50)
    val eventualResponse = endpoint(args)
    Thread.sleep(50)
    eventualResponse.unsafeToFuture()
  }

  def encodeUrl[A](url: client.Url[A])(a: A): String =
    url.encodeUrl(a).toOption.get.renderString

  clientTestSuite()
  basicAuthSuite()
  jsonFromCodecTestSuite()

  override def afterAll(): Unit = {
    shutdown.unsafeRunSync()
    super.afterAll()
  }

} 
Example 52
Source File: ServerInterpreterTest.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.http4s.server

import java.net.ServerSocket

import cats.effect.{ContextShift, IO, Timer}
import endpoints4s.{Invalid, Valid}
import endpoints4s.algebra.server.{
  BasicAuthenticationTestSuite,
  DecodedUrl,
  EndpointsTestSuite,
  JsonEntitiesFromSchemasTestSuite,
  SumTypedEntitiesTestSuite,
  TextEntitiesTestSuite
}
import org.http4s.server.Router
import org.http4s.{HttpRoutes, Uri}
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.syntax.kleisli._

import scala.concurrent.ExecutionContext

class ServerInterpreterTest
    extends EndpointsTestSuite[EndpointsTestApi]
    with BasicAuthenticationTestSuite[EndpointsTestApi]
    with JsonEntitiesFromSchemasTestSuite[EndpointsTestApi]
    with TextEntitiesTestSuite[EndpointsTestApi]
    with SumTypedEntitiesTestSuite[EndpointsTestApi] {

  val serverApi = new EndpointsTestApi()

  def decodeUrl[A](url: serverApi.Url[A])(rawValue: String): DecodedUrl[A] = {
    val uri =
      Uri.fromString(rawValue).getOrElse(sys.error(s"Illegal URI: $rawValue"))

    url.decodeUrl(uri) match {
      case None                  => DecodedUrl.NotMatched
      case Some(Invalid(errors)) => DecodedUrl.Malformed(errors)
      case Some(Valid(a))        => DecodedUrl.Matched(a)
    }
  }

  private def serveGeneralEndpoint[Req, Resp](
      endpoint: serverApi.Endpoint[Req, Resp],
      request2response: Req => Resp
  )(runTests: Int => Unit): Unit = {
    val port = {
      val socket = new ServerSocket(0)
      try socket.getLocalPort
      finally if (socket != null) socket.close()
    }
    implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
    implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)
    val service = HttpRoutes.of[IO](endpoint.implementedBy(request2response))
    val httpApp = Router("/" -> service).orNotFound
    val server =
      BlazeServerBuilder[IO](ExecutionContext.global)
        .bindHttp(port, "localhost")
        .withHttpApp(httpApp)
    server.resource.use(_ => IO(runTests(port))).unsafeRunSync()
  }

  def serveEndpoint[Resp](
      endpoint: serverApi.Endpoint[_, Resp],
      response: => Resp
  )(runTests: Int => Unit): Unit =
    serveGeneralEndpoint(endpoint, (_: Any) => response)(runTests)

  def serveIdentityEndpoint[Resp](
      endpoint: serverApi.Endpoint[Resp, Resp]
  )(runTests: Int => Unit): Unit =
    serveGeneralEndpoint(endpoint, identity[Resp])(runTests)
} 
Example 53
Source File: RerunnableContextShift.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.util.effect

import cats.effect.ContextShift
import com.twitter.util.{ Future, FuturePool, Promise }
import io.catbird.util.Rerunnable

import scala.Unit
import java.lang.Runnable
import java.util.concurrent.ExecutorService

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutorService }


  object Implicits {
    final implicit def global: ContextShift[Rerunnable] = RerunnableContextShift.global
  }
}

final private[effect] class RerunnableContextShift private (ec: ExecutionContext) extends ContextShift[Rerunnable] {
  private final lazy val futurePool = FuturePool.interruptible(ec.asInstanceOf[ExecutionContextExecutorService])

  override def shift: Rerunnable[Unit] =
    Rerunnable.withFuturePool(futurePool)(()) // This is a bit of a hack, but it will have to do

  override def evalOn[A](targetEc: ExecutionContext)(fa: Rerunnable[A]): Rerunnable[A] =
    for {
      r <- executeOn(targetEc)(fa).liftToTry
      _ <- shift
      a <- Rerunnable.fromFuture(Future.value(r).lowerFromTry)
    } yield a

  private def executeOn[A](targetEc: ExecutionContext)(fa: Rerunnable[A]): Rerunnable[A] =
    Rerunnable.fromFuture {
      val p = Promise[A]()

      targetEc.execute(new Runnable {
        override def run(): Unit =
          fa.run.proxyTo[A](p)
      })

      p
    }
} 
Example 54
Source File: RerunnableContextShiftSuite.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.util.effect

import cats.effect.{ ContextShift, IO, Sync }
import com.twitter.util.{ Await, Future, FuturePool }
import io.catbird.util.Rerunnable
import org.scalatest.Outcome
import org.scalatest.funsuite.FixtureAnyFunSuite

class RerunnableContextShiftSuite extends FixtureAnyFunSuite with ThreadPoolNamingSupport {

  protected final class FixtureParam {
    val futurePoolName = "future-pool"
    val otherPoolName = "other-pool"
    val ioPoolName = "io-pool"

    val futurePool = FuturePool.interruptible(newNamedThreadPool(futurePoolName))
    val otherPool = newNamedThreadPool(otherPoolName)
    val ioPool = newNamedThreadPool(ioPoolName)

    def newIO: IO[String] = IO(currentThreadName())

    def newFuture: Future[String] = futurePool(currentThreadName())

    def newRerunnable: Rerunnable[String] = Rerunnable(currentThreadName())
  }

  test("ContextShift[Rerunnable].shift shifts to the pool of the instance") { f =>
    implicit val cs: ContextShift[Rerunnable] =
      RerunnableContextShift.fromExecutionContext(f.ioPool)

    val (poolName1, poolName2, poolName3) =
      (for {
        poolName1 <- Rerunnable.fromFuture(f.newFuture)

        _ <- ContextShift[Rerunnable](cs).shift

        poolName2 <- Sync[Rerunnable].delay(currentThreadName())

        poolName3 <- Rerunnable.fromFuture(f.newFuture)
      } yield (poolName1, poolName2, poolName3)).run.await

    assert(poolName1 == f.futurePoolName)
    assert(poolName2 == f.ioPoolName)
    assert(poolName2 == f.ioPoolName)
  }

  test("ContextShift[Rerunnable].evalOn executes on correct pool and shifts back to previous pool") { f =>
    implicit val cs: ContextShift[Rerunnable] =
      RerunnableContextShift.fromExecutionContext(f.ioPool)

    val (poolName1, poolName2, poolName3) =
      (for {
        poolName1 <- f.newRerunnable

        poolName2 <- ContextShift[Rerunnable].evalOn(f.otherPool)(f.newRerunnable)

        poolName3 <- f.newRerunnable
      } yield (poolName1, poolName2, poolName3)).run.await

    assert(poolName1 == currentThreadName()) // The first rerunnable is not explicitly evaluated on a dedicated pool
    assert(poolName2 == f.otherPoolName)
    assert(poolName3 == f.ioPoolName)
  }

  test("ContextShift[Rerunnable].evalOn executes on correct pool and shifts back to future pool") { f =>
    implicit val cs: ContextShift[Rerunnable] =
      RerunnableContextShift.fromExecutionContext(f.ioPool)

    val (poolName1, poolName2, poolName3) =
      (for {
        poolName1 <- Rerunnable.fromFuture(f.newFuture) // The future was started on a dedicated pool (e.g. netty)

        poolName2 <- ContextShift[Rerunnable].evalOn(f.otherPool)(f.newRerunnable)

        poolName3 <- Rerunnable.fromFuture(f.newFuture)
      } yield (poolName1, poolName2, poolName3)).run.await

    assert(poolName1 == f.futurePoolName)
    assert(poolName2 == f.otherPoolName)
    assert(poolName3 == f.futurePoolName)
  }

  implicit private class FutureAwaitOps[A](future: Future[A]) {
    def await: A = Await.result(future)
  }

  override protected def withFixture(test: OneArgTest): Outcome = withFixture(test.toNoArgTest(new FixtureParam))
} 
Example 55
Source File: ContextShiftingSuite.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.util.effect

import cats.effect.{ ContextShift, IO }
import com.twitter.util.{ ExecutorServiceFuturePool, Future, FuturePool }
import org.scalatest.Outcome
import org.scalatest.funsuite.FixtureAnyFunSuite

import scala.concurrent.ExecutionContext

class ContextShiftingSuite extends FixtureAnyFunSuite with ThreadPoolNamingSupport {

  protected final class FixtureParam {
    val ioPoolName = "io-pool"
    val futurePoolName = "future-pool"

    val ioPool = newNamedThreadPool(ioPoolName)

    val futurePool: ExecutorServiceFuturePool = // threadpool of Future (often managed by a library like finagle-http)
      FuturePool(newNamedThreadPool(futurePoolName))

    def newIO: IO[String] = IO(currentThreadName())

    def newFuture: Future[String] = futurePool.apply {
      // Not 100% sure why but this sleep is needed to reproduce the error. There might be an optimization if the
      // Future is already resolved at some point
      Thread.sleep(200)
      currentThreadName()
    }
  }

  test("After resolving the Future with futureToAsync stay on the Future threadpool") { f =>
    implicit val contextShift: ContextShift[IO] = // threadpool of IO (often provided by IOApp)
      IO.contextShift(ExecutionContext.fromExecutor(f.ioPool))

    val (futurePoolName, ioPoolName) = (for {
      futurePoolName <- futureToAsync[IO, String](f.newFuture)

      ioPoolName <- f.newIO
    } yield (futurePoolName, ioPoolName)).start(contextShift).flatMap(_.join).unsafeRunSync()

    assert(futurePoolName == f.futurePoolName)
    assert(ioPoolName == f.futurePoolName) // Uh oh, this is likely not what the user wants
  }

  test("After resolving the Future with futureToAsyncAndShift shift back to the threadpool of ContextShift[F]") { f =>
    implicit val contextShift: ContextShift[IO] = // threadpool of IO (often provided by IOApp)
      IO.contextShift(ExecutionContext.fromExecutor(f.ioPool))

    // If you'd use `futureToAsync` here instead, this whole thing would sometimes stay on the future-pool
    val (futurePoolName, ioPoolName) = (for {
      futurePoolName <- futureToAsyncAndShift[IO, String](f.newFuture)

      ioPoolName <- f.newIO
    } yield (futurePoolName, ioPoolName))
      .start(contextShift) // start the computation on the default threadpool...
      .flatMap(_.join) // ...then block until we have the results
      .unsafeRunSync()

    assert(futurePoolName == f.futurePoolName)
    assert(ioPoolName == f.ioPoolName)
  }

  override protected def withFixture(test: OneArgTest): Outcome = withFixture(test.toNoArgTest(new FixtureParam))
} 
Example 56
Source File: package.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
import java.io.OutputStream
import java.nio.file.Files

import cats.effect.{Blocker, Concurrent, ContextShift, Resource, Sync}
import fs2.{Chunk, Hotswap, Pipe, Pull, RaiseThrowable, Stream}
import cats.implicits._

package object blobstore {
  protected[blobstore] def _writeAllToOutputStream1[F[_]](in: Stream[F, Byte], out: OutputStream, blocker: Blocker)(
    implicit F: Sync[F],
    CS: ContextShift[F]
  ): Pull[F, Nothing, Unit] = {
    in.pull.uncons.flatMap {
      case None => Pull.done
      case Some((hd, tl)) =>
        Pull.eval[F, Unit](blocker.delay(out.write(hd.toArray))) >> _writeAllToOutputStream1(tl, out, blocker)
    }
  }

  protected[blobstore] def bufferToDisk[F[_]](
    chunkSize: Int,
    blocker: Blocker
  )(implicit F: Sync[F], CS: ContextShift[F]): Pipe[F, Byte, (Long, Stream[F, Byte])] = { in =>
    Stream.bracket(F.delay(Files.createTempFile("bufferToDisk", ".bin")))(p => F.delay(p.toFile.delete).void).flatMap {
      p =>
        in.through(fs2.io.file.writeAll(p, blocker)).drain ++
          Stream.emit((p.toFile.length, fs2.io.file.readAll(p, blocker, chunkSize)))
    }
  }

  private[blobstore] def putRotateBase[F[_]: Concurrent, T](
    limit: Long,
    openNewFile: Resource[F, T]
  )(consume: T => Chunk[Byte] => F[Unit]): Pipe[F, Byte, Unit] = { in =>
    Stream
      .resource(Hotswap(openNewFile))
      .flatMap {
        case (hotswap, newFile) =>
          goRotate(limit, 0L, in, newFile, hotswap, openNewFile)(
            consume = consumer => bytes => Pull.eval(consume(consumer)(bytes)).as(consumer),
            extract = Stream.emit
          ).stream
      }
  }

  private[blobstore] def goRotate[F[_]: RaiseThrowable, A, B](
    limit: Long,
    acc: Long,
    s: Stream[F, Byte],
    consumer: B,
    hotswap: Hotswap[F, A],
    resource: Resource[F, A]
  )(
    consume: B => Chunk[Byte] => Pull[F, Unit, B],
    extract: A => Stream[F, B]
  ): Pull[F, Unit, Unit] = {
    val toWrite = (limit - acc).min(Int.MaxValue.toLong).toInt
    s.pull.unconsLimit(toWrite).flatMap {
      case Some((hd, tl)) =>
        val newAcc = acc + hd.size
        consume(consumer)(hd).flatMap { consumer =>
          if (newAcc >= limit) {
            Pull
              .eval(hotswap.swap(resource))
              .flatMap(a => extract(a).pull.headOrError)
              .flatMap(nc => goRotate(limit, 0L, tl, nc, hotswap, resource)(consume, extract))
          } else {
            goRotate(limit, newAcc, tl, consumer, hotswap, resource)(consume, extract)
          }
        }
      case None => Pull.done
    }
  }

} 
Example 57
Source File: AkkaInterpExampleMain.scala    From hammock   with MIT License 5 votes vote down vote up
package example.interpret
import akka.actor.ActorSystem
import akka.http.scaladsl.{Http, HttpExt}
import akka.stream.ActorMaterializer
import cats.effect.{ContextShift, IO}
import example.repr.{GetResp, GetRespWithQueryString, Req, Resp}
import hammock.{Hammock, Method}
import hammock.marshalling._
import hammock.circe.implicits._
import io.circe.generic.auto._
import hammock.akka.AkkaInterpreter._
import scala.concurrent.ExecutionContext

object AkkaInterpExampleMain extends App {

  implicit val actorSystem: ActorSystem        = ActorSystem()
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  implicit val ec: ExecutionContext            = ExecutionContext.Implicits.global
  implicit val cs: ContextShift[IO]            = IO.contextShift(ec)
  implicit val client: HttpExt                 = Http(actorSystem)

  //GET
  val getResp = Hammock
    .request(Method.GET, getUri, Map())
    .as[GetResp]
    .exec[IO]
    .unsafeRunSync

  println(s"GET::Response = $getResp")

  //GET with query string
  val getRespWithQueryString = Hammock
    .request(Method.GET, getUriWithQueryString, Map())
    .as[GetRespWithQueryString]
    .exec[IO]
    .unsafeRunSync

  println(s"GET with query string::Response = $getRespWithQueryString")

  //POST
  val postResp = Hammock
    .request(Method.POST, postUri, Map(), Some(Req("name", 4)))
    .as[Resp]
    .exec[IO]
    .unsafeRunSync

  println(s"POST::Response = $postResp")

  //PUT
  val putResp = Hammock
    .request(Method.PUT, putUri, Map(), Some(Req("name", 4)))
    .as[Resp]
    .exec[IO]
    .unsafeRunSync

  println(s"PUT::Response = $putResp")

  //DELETE
  val deleteResp = Hammock
    .request(Method.DELETE, deleteUri, Map(), Some(Req("name", 4)))
    .exec[IO]
    .unsafeRunSync

  println(s"DELETE::Response = $deleteResp")

  actorSystem.terminate()
} 
Example 58
Source File: Interpreter.scala    From hammock   with MIT License 5 votes vote down vote up
package hammock
package js

import cats._
import cats.effect.{Async, ContextShift}
import cats.syntax.applicative._
import cats.syntax.flatMap._
import cats.syntax.functor._
import cats.syntax.show._
import org.scalajs.dom.ext.Ajax
import org.scalajs.dom.ext.Ajax.InputData
import java.nio.ByteBuffer

object Interpreter {

  def apply[F[_]](implicit F: InterpTrans[F]): InterpTrans[F] = F

  implicit def instance[F[_]: Async: ContextShift]: InterpTrans[F] = new InterpTrans[F] {
    def trans: HttpF ~> F = {

      def doReq(reqF: HttpF[HttpResponse]): F[HttpResponse] = {
        val timeout = 0
        val headers = reqF.req.headers
        val data: InputData = reqF.req.entity.fold(InputData.str2ajax(""))(
          _.cata(
            string => InputData.str2ajax(string.content),
            bytes => InputData.byteBuffer2ajax(ByteBuffer.wrap(bytes.content)),
            Function.const(InputData.str2ajax("")))
        )
        val method = toMethod(reqF)

        for {
          response <- Async.fromFuture(
            Async[F].delay(Ajax(method.name, reqF.req.uri.show, data, timeout, headers, false, "")))
          responseHeaders <- parseHeaders(response.getAllResponseHeaders)
          status = Status.get(response.status)
          body   = response.responseText
        } yield HttpResponse(status, responseHeaders, Entity.StringEntity(body))
      }

      def toMethod(reqF: HttpF[HttpResponse]): Method = reqF match {
        case Options(_) => Method.OPTIONS
        case Get(_)     => Method.GET
        case Head(_)    => Method.HEAD
        case Post(_)    => Method.POST
        case Put(_)     => Method.PUT
        case Delete(_)  => Method.DELETE
        case Trace(_)   => Method.TRACE
        case Patch(_)   => Method.PATCH
      }

      def parseHeaders(str: String): F[Map[String, String]] = str match {
        case null => Map.empty[String, String].pure[F]
        case string =>
          Async[F].delay(
            string
              .split("\r\n")
              .map({ line =>
                val splitted = line.split(": ")
                (splitted.head, splitted.tail.mkString("").trim)
              })
              .toMap)
      }

      λ[HttpF ~> F] {
        case req @ (Options(_) | Get(_) | Head(_) | Post(_) | Put(_) | Delete(_) | Trace(_) | Patch(_)) => doReq(req)
      }
    }
  }
} 
Example 59
Source File: ProcessAlg.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.io

import better.files.File
import cats.effect.{Blocker, Concurrent, ContextShift, Timer}
import cats.implicits._
import io.chrisdavenport.log4cats.Logger
import org.scalasteward.core.application.Cli.EnvVar
import org.scalasteward.core.application.Config
import org.scalasteward.core.util.Nel

trait ProcessAlg[F[_]] {
  def exec(command: Nel[String], cwd: File, extraEnv: (String, String)*): F[List[String]]

  def execSandboxed(command: Nel[String], cwd: File): F[List[String]]
}

object ProcessAlg {
  abstract class UsingFirejail[F[_]](config: Config) extends ProcessAlg[F] {
    override def execSandboxed(command: Nel[String], cwd: File): F[List[String]] = {
      val envVars = config.envVars.map(EnvVar.unapply(_).get)
      if (config.disableSandbox)
        exec(command, cwd, envVars: _*)
      else {
        val whitelisted = (cwd.pathAsString :: config.whitelistedDirectories)
          .map(dir => s"--whitelist=$dir")
        val readOnly = config.readOnlyDirectories
          .map(dir => s"--read-only=$dir")
        exec(Nel("firejail", whitelisted ++ readOnly) ::: command, cwd, envVars: _*)
      }
    }
  }

  def create[F[_]](blocker: Blocker)(implicit
      config: Config,
      contextShift: ContextShift[F],
      logger: Logger[F],
      timer: Timer[F],
      F: Concurrent[F]
  ): ProcessAlg[F] =
    new UsingFirejail[F](config) {
      override def exec(
          command: Nel[String],
          cwd: File,
          extraEnv: (String, String)*
      ): F[List[String]] =
        logger.debug(s"Execute ${command.mkString_(" ")}") >>
          process.slurp[F](
            command,
            Some(cwd.toJava),
            extraEnv.toMap,
            config.processTimeout,
            logger.trace(_),
            blocker
          )
    }
} 
Example 60
Source File: TestInstances.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core

import _root_.io.chrisdavenport.log4cats.Logger
import _root_.io.chrisdavenport.log4cats.slf4j.Slf4jLogger
import cats.effect.{ContextShift, IO, Timer}
import org.scalacheck.{Arbitrary, Cogen, Gen}
import org.scalasteward.core.data.Version
import org.scalasteward.core.util.Change
import org.scalasteward.core.util.Change.{Changed, Unchanged}
import scala.concurrent.ExecutionContext

object TestInstances {
  implicit def changeArbitrary[T](implicit arbT: Arbitrary[T]): Arbitrary[Change[T]] =
    Arbitrary(arbT.arbitrary.flatMap(t => Gen.oneOf(Changed(t), Unchanged(t))))

  implicit val ioContextShift: ContextShift[IO] =
    IO.contextShift(ExecutionContext.global)

  implicit val ioLogger: Logger[IO] =
    Slf4jLogger.getLogger[IO]

  implicit val ioTimer: Timer[IO] =
    IO.timer(ExecutionContext.global)

  implicit val versionArbitrary: Arbitrary[Version] = {
    val versionChar = Gen.frequency(
      (8, Gen.numChar),
      (5, Gen.const('.')),
      (3, Gen.alphaChar),
      (2, Gen.const('-'))
    )
    Arbitrary(Gen.listOf(versionChar).map(_.mkString).map(Version.apply))
  }

  implicit val versionCogen: Cogen[Version] =
    Cogen(_.alnumComponents.map {
      case Version.Component.Numeric(value) => BigInt(value).toLong
      case a @ Version.Component.Alpha(_)   => a.order.toLong
      case _                                => 0L
    }.sum)
} 
Example 61
Source File: IOSuite.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal

import cats.Parallel
import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer}
import cats.implicits._
import com.evolutiongaming.smetrics.MeasureDuration
import org.scalatest.Succeeded

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

object IOSuite {
  val Timeout: FiniteDuration = 5.seconds

  implicit val executor: ExecutionContextExecutor = ExecutionContext.global

  implicit val contextShiftIO: ContextShift[IO]     = IO.contextShift(executor)
  implicit val concurrentIO: Concurrent[IO]         = IO.ioConcurrentEffect
  implicit val timerIO: Timer[IO]                   = IO.timer(executor)
  implicit val parallel: Parallel[IO]               = IO.ioParallel
  implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock(Clock[IO])

  def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = {
    io.timeout(timeout).as(Succeeded).unsafeToFuture
  }

  implicit class IOOps[A](val self: IO[A]) extends AnyVal {
    def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout)
  }
} 
Example 62
Source File: Fs2SubscriptionStream.scala    From sangria   with Apache License 2.0 5 votes vote down vote up
package sangria.util

import cats.effect.{ContextShift, IO}
import fs2.Stream
import sangria.streaming.SubscriptionStream
import scala.concurrent.Future
import scala.language.higherKinds

object Fs2Support {
  type IOS[A] = Stream[IO, A]

  class Fs2SubscriptionStream(implicit CS: ContextShift[IO]) extends SubscriptionStream[IOS] {
    def supported[T[_]](other: SubscriptionStream[T]) = other.isInstanceOf[Fs2SubscriptionStream]

    def map[A, B](source: IOS[A])(fn: A => B) = source.map(fn)

    def singleFuture[T](value: Future[T]) =
      Stream.eval(IO.fromFuture(IO(value)))

    def single[T](value: T) = Stream.emit(value)

    def mapFuture[A, B](source: IOS[A])(fn: A => Future[B]) =
      source.evalMap(a => IO.fromFuture(IO(fn(a))))

    def first[T](s: IOS[T]) =
      s.compile.toVector.map(_.head).unsafeToFuture

    def failed[T](e: Throwable) = Stream.raiseError[IO](e)

    def onComplete[Ctx, Res](result: IOS[Res])(op: => Unit) =
      result.onFinalize(IO(op))

    def flatMapFuture[Ctx, Res, T](future: Future[T])(resultFn: T => IOS[Res]) =
      Stream.eval(IO.fromFuture(IO(future))).flatMap(resultFn)

    def merge[T](streams: Vector[IOS[T]]) =
      if (streams.nonEmpty)
        streams.tail.foldLeft(streams.head)(_.merge(_))
      else
        throw new IllegalStateException("No streams produced!")

    def recover[T](stream: IOS[T])(fn: Throwable => T) =
      stream.handleErrorWith { case e => Stream.emit(fn(e)) }
  }

  implicit def observableSubscriptionStream(implicit CS: ContextShift[IO]): SubscriptionStream[IOS] =
    new Fs2SubscriptionStream
} 
Example 63
Source File: commands.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats

import algebra._
import cats.effect.{ Concurrent, ContextShift }

trait RedisCommands[F[_], K, V]
    extends StringCommands[F, K, V]
    with HashCommands[F, K, V]
    with SetCommands[F, K, V]
    with SortedSetCommands[F, K, V]
    with ListCommands[F, K, V]
    with GeoCommands[F, K, V]
    with ConnectionCommands[F]
    with ServerCommands[F, K]
    with TransactionalCommands[F, K]
    with PipelineCommands[F]
    with ScriptCommands[F, K, V]
    with KeyCommands[F, K]

object RedisCommands {
  implicit class LiftKOps[F[_], K, V](val cmd: RedisCommands[F, K, V]) extends AnyVal {
    def liftK[G[_]: Concurrent: ContextShift]: RedisCommands[G, K, V] =
      cmd.asInstanceOf[BaseRedis[F, K, V]].liftK[G]
  }
} 
Example 64
Source File: JRFutureSpec.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats.effect

import java.util.concurrent.CompletableFuture

import cats.effect.{ Blocker, ContextShift, IO }
import scala.concurrent.ExecutionContext
import munit.FunSuite

class JRFutureSpec extends FunSuite {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  val currentThread: IO[String] = IO(Thread.currentThread().getName)

  test("it shifts back once the Future is converted") {
    val ioa =
      Blocker[IO].use { blocker =>
        JRFuture.fromCompletableFuture[IO, String] {
          IO {
            val jFuture = new CompletableFuture[String]()
            jFuture.complete("foo")
            jFuture
          }
        }(blocker)
      }

    (ioa *> currentThread)
      .flatMap(t => IO(assert(t.contains("scala-execution-context-global"))))
      .unsafeToFuture()
  }

  test("it shifts back even when the CompletableFuture fails") {
    val ioa =
      Blocker[IO].use { blocker =>
        JRFuture.fromCompletableFuture[IO, String] {
          IO {
            val jFuture = new CompletableFuture[String]()
            jFuture.completeExceptionally(new RuntimeException("Purposely fail"))
            jFuture
          }
        }(blocker)
      }

    (ioa.attempt *> currentThread)
      .flatMap(t => IO(assert(t.contains("scala-execution-context-global"))))
      .unsafeToFuture()
  }

} 
Example 65
Source File: DummyCpgProvider.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.cpg

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.jdk.CollectionConverters._
import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.codepropertygraph.generated.nodes.NewMethod
import io.shiftleft.cpgserver.query.{CpgOperationFailure, CpgOperationResult, CpgOperationSuccess}
import io.shiftleft.passes.{CpgPass, DiffGraph}
import io.shiftleft.semanticcpg.language._


class DummyCpgProvider(implicit cs: ContextShift[IO]) extends CpgProvider {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val cpgMap: Map[UUID, CpgOperationResult[Cpg]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[Cpg]].asScala

  private val uuidProvider = IO(UUID.randomUUID)

  private class MyPass(cpg: Cpg) extends CpgPass(cpg) {
    override def run(): Iterator[DiffGraph] = {
      implicit val diffGraph: DiffGraph.Builder = DiffGraph.newBuilder
      NewMethod(name = "main", isExternal = false).start.store
      Iterator(diffGraph.build())
    }
  }

  override def createCpg(filenames: Set[String]): IO[UUID] = {
    val cpg = new Cpg

    for {
      cpgId <- uuidProvider
      _ <- blocker
        .blockOn(IO(new MyPass(cpg).createAndApply()))
        .runAsync {
          case Right(_) => IO(cpgMap.put(cpgId, CpgOperationSuccess(cpg))).map(_ => ())
          case Left(ex) => IO(cpgMap.put(cpgId, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield cpgId
  }

  override def retrieveCpg(uuid: UUID): OptionT[IO, CpgOperationResult[Cpg]] = {
    OptionT.fromOption(cpgMap.get(uuid))
  }
} 
Example 66
Source File: ServerAmmoniteExecutor.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.query

import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.console.scripting.AmmoniteExecutor

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import scala.jdk.CollectionConverters._

abstract class ServerAmmoniteExecutor(implicit cs: ContextShift[IO]) extends AmmoniteExecutor {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val queryResultMap: Map[UUID, CpgOperationResult[String]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[String]].asScala

  private val uuidProvider = IO { UUID.randomUUID }

  def executeQuery(cpg: Cpg, query: String): IO[UUID] = {
    for {
      resultUuid <- uuidProvider
      _ <- blocker
        .blockOn(runQuery(query, cpg))
        .runAsync {
          case Right(result) => IO(queryResultMap.put(resultUuid, CpgOperationSuccess(result.toString))).map(_ => ())
          case Left(ex)      => IO(queryResultMap.put(resultUuid, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield resultUuid
  }

  def retrieveQueryResult(queryId: UUID): OptionT[IO, CpgOperationResult[String]] = {
    OptionT.fromOption(queryResultMap.get(queryId))
  }

  def executeQuerySync(cpg: Cpg, query: String): IO[CpgOperationResult[String]] = {
    for {
      result <- runQuery(query, cpg)
        .map(v => CpgOperationSuccess(v.toString))
        .handleErrorWith(err => IO(CpgOperationFailure(err)))
    } yield result
  }
} 
Example 67
Source File: SwaggerRoute.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.route

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.circe.generic.auto._
import io.circe.syntax._
import org.http4s._
import org.http4s.circe._
import org.http4s.dsl.io._
import org.http4s.headers.Location
import org.webjars.WebJarAssetLocator
import io.shiftleft.cpgserver.route.CpgRoute.ApiError

final class SwaggerRoute {

  private val blockingEc = ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor)
  private val blocker = Blocker.liftExecutionContext(blockingEc)
  private implicit val blockingCs: ContextShift[IO] = IO.contextShift(blockingEc)

  private val swaggerUiVersion = IO { new WebJarAssetLocator().getWebJars.get("swagger-ui") }
  private val swaggerUiResources = swaggerUiVersion.map { ver =>
    s"/META-INF/resources/webjars/swagger-ui/$ver"
  }
  private val swaggerUiPath = Path("swagger-ui")

  val routes: HttpRoutes[IO] = HttpRoutes.of {
    case GET -> Root / ("swagger-ui" | "docs") =>
      PermanentRedirect(Location(Uri.unsafeFromString("swagger-ui/index.html")))

    // TODO discuss with jacob: according to scalac this is unreachable... commenting for now since it probably never worked anyway
    case req @ GET -> (Root | `swaggerUiPath`) / "swagger.yaml" =>
      StaticFile
        .fromResource("/swagger.yaml", blocker, Some(req))
        .getOrElseF(InternalServerError(ApiError("Swagger documentation is missing.").asJson))

    case req @ GET -> path if path.startsWith(swaggerUiPath) => {
      val file = path.toList.tail.mkString("/", "/", "") match {
        case f if f == "/index.html" =>
          StaticFile.fromResource[IO]("/swagger-ui/index.html", blocker, Some(req))
        case f =>
          OptionT.liftF(swaggerUiResources).flatMap { resources =>
            StaticFile.fromResource[IO](resources + f, blocker, Some(req))
          }
      }
      file.getOrElseF(InternalServerError(ApiError(s"Requested file [$file] is missing.").asJson))
    }
  }
}

object SwaggerRoute {
  def apply(): SwaggerRoute =
    new SwaggerRoute
} 
Example 68
Source File: DummyCpgProviderSpec.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.cpg

import java.util.UUID

import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{ContextShift, IO}
import org.scalatest.concurrent.Eventually

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.cpgserver.BaseSpec
import io.shiftleft.cpgserver.query.CpgOperationResult

import scala.concurrent.duration._
import scala.language.postfixOps

class DummyCpgProviderSpec extends BaseSpec with Eventually {

  private implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  private def withNewCpgProvider[T](f: DummyCpgProvider => T): T = {
    f(new DummyCpgProvider)
  }

  "Creating a CPG" should {
    "return a UUID referencing the eventual CPG" in withNewCpgProvider { cpgProvider =>
      noException should be thrownBy cpgProvider.createCpg(Set.empty).unsafeRunSync()
    }
  }

  "Retrieving a CPG" should {
    "return a success if the CPG was created successfully" in withNewCpgProvider { cpgProvider =>
      val cpgId = cpgProvider.createCpg(Set.empty).unsafeRunSync()

      eventually(timeout(10 seconds), interval(1 seconds)) {
        cpgProvider.retrieveCpg(cpgId).value.unsafeRunSync() shouldBe defined
      }
    }

    "return an empty OptionT if the CPG does not exist" in withNewCpgProvider { cpgProvider =>
      cpgProvider.retrieveCpg(UUID.randomUUID) shouldBe OptionT.none[IO, CpgOperationResult[Cpg]]
    }
  }
} 
Example 69
Source File: ServerAmmoniteExecutorSpec.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.query

import cats.effect.{ContextShift, IO}
import org.scalatest.{Matchers, WordSpec}

import io.shiftleft.codepropertygraph.Cpg

class ServerAmmoniteExecutorSpec extends WordSpec with Matchers {

  private implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global)

  private class DummyServerAmmoniteExecutor extends ServerAmmoniteExecutor {
    override protected def predef: String = "import io.shiftleft.semanticcpg.language._"
  }

  private def withServerExecutor[T](f: ServerAmmoniteExecutor => T): T = {
    f(new DummyServerAmmoniteExecutor)
  }

  "A ServerAmmoniteExecutor" should {
    "run a query synchronously" in withServerExecutor { executor =>
      executor.executeQuerySync(Cpg.emptyCpg, "cpg.method.l").unsafeRunSync() should matchPattern {
        case CpgOperationSuccess("List()") =>
      }
    }
  }
} 
Example 70
Source File: RabbitSuite.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.interpreter

import cats.effect.{ContextShift, IO}
import cats.implicits._
import dev.profunktor.fs2rabbit.BaseSpec
import dev.profunktor.fs2rabbit.config.Fs2RabbitConfig

import scala.concurrent.ExecutionContext

class RabbitSuite extends BaseSpec with Fs2RabbitSpec {

  override implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  override val config: Fs2RabbitConfig =
    Fs2RabbitConfig(
      host = "localhost",
      port = 5672,
      virtualHost = "/",
      connectionTimeout = 30,
      ssl = false,
      username = "guest".some,
      password = "guest".some,
      requeueOnNack = false,
      requeueOnReject = false,
      internalQueueSize = 500.some
    )

} 
Example 71
Source File: Publish.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.algebra

import cats.effect.syntax.effect._
import cats.effect.{Blocker, ContextShift, Effect, Sync}
import cats.syntax.functor._
import com.rabbitmq.client.{AMQP, ReturnListener}
import dev.profunktor.fs2rabbit.model._

object Publish {
  def make[F[_]: Effect: ContextShift](
      blocker: Blocker
  ): Publish[F] =
    new Publish[F] {
      override def basicPublish(channel: AMQPChannel,
                                exchangeName: ExchangeName,
                                routingKey: RoutingKey,
                                msg: AmqpMessage[Array[Byte]]): F[Unit] =
        blocker.delay {
          channel.value.basicPublish(
            exchangeName.value,
            routingKey.value,
            msg.properties.asBasicProps,
            msg.payload
          )
        }

      override def basicPublishWithFlag(channel: AMQPChannel,
                                        exchangeName: ExchangeName,
                                        routingKey: RoutingKey,
                                        flag: PublishingFlag,
                                        msg: AmqpMessage[Array[Byte]]): F[Unit] =
        blocker.delay {
          channel.value.basicPublish(
            exchangeName.value,
            routingKey.value,
            flag.mandatory,
            msg.properties.asBasicProps,
            msg.payload
          )
        }

      override def addPublishingListener(
          channel: AMQPChannel,
          listener: PublishReturn => F[Unit]
      ): F[Unit] =
        Sync[F].delay {
          val returnListener = new ReturnListener {
            override def handleReturn(replyCode: Int,
                                      replyText: String,
                                      exchange: String,
                                      routingKey: String,
                                      properties: AMQP.BasicProperties,
                                      body: Array[Byte]): Unit = {
              val publishReturn =
                PublishReturn(
                  ReplyCode(replyCode),
                  ReplyText(replyText),
                  ExchangeName(exchange),
                  RoutingKey(routingKey),
                  AmqpProperties.unsafeFrom(properties),
                  AmqpBody(body)
                )

              listener(publishReturn).toIO.unsafeRunAsync(_ => ())
            }
          }

          channel.value.addReturnListener(returnListener)
        }.void

      override def clearPublishingListeners(channel: AMQPChannel): F[Unit] =
        Sync[F].delay {
          channel.value.clearReturnListeners()
        }.void
    }
}

trait Publish[F[_]] {
  def basicPublish(channel: AMQPChannel,
                   exchangeName: ExchangeName,
                   routingKey: RoutingKey,
                   msg: AmqpMessage[Array[Byte]]): F[Unit]
  def basicPublishWithFlag(channel: AMQPChannel,
                           exchangeName: ExchangeName,
                           routingKey: RoutingKey,
                           flag: PublishingFlag,
                           msg: AmqpMessage[Array[Byte]]): F[Unit]
  def addPublishingListener(channel: AMQPChannel, listener: PublishReturn => F[Unit]): F[Unit]
  def clearPublishingListeners(channel: AMQPChannel): F[Unit]
} 
Example 72
Source File: TextExtract.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.extract.ocr

import cats.effect.{Blocker, ContextShift, Sync}
import fs2.Stream

import docspell.common._
import docspell.extract.internal.Text
import docspell.files._

object TextExtract {

  def extract[F[_]: Sync: ContextShift](
      in: Stream[F, Byte],
      blocker: Blocker,
      logger: Logger[F],
      lang: String,
      config: OcrConfig
  ): Stream[F, Text] =
    extractOCR(in, blocker, logger, lang, config)

  def extractOCR[F[_]: Sync: ContextShift](
      in: Stream[F, Byte],
      blocker: Blocker,
      logger: Logger[F],
      lang: String,
      config: OcrConfig
  ): Stream[F, Text] =
    Stream
      .eval(TikaMimetype.detect(in, MimeTypeHint.none))
      .flatMap({
        case MimeType.pdf =>
          Stream.eval(Ocr.extractPdf(in, blocker, logger, lang, config)).unNoneTerminate

        case mt if mt.primary == "image" =>
          Ocr.extractImage(in, blocker, logger, lang, config)

        case mt =>
          raiseError(s"File `$mt` not supported")
      })
      .map(Text.apply)

  private def raiseError[F[_]: Sync](msg: String): Stream[F, Nothing] =
    Stream.raiseError[F](new Exception(msg))
} 
Example 73
Source File: Commands.scala    From franklin   with Apache License 2.0 5 votes vote down vote up
package com.azavea.franklin.api.commands

import cats.effect.{ContextShift, ExitCode, IO}
import cats.implicits._
import com.azavea.franklin.crawler.StacImport
import com.monovore.decline._
import doobie.Transactor
import doobie.free.connection.{rollback, setAutoCommit, unit}
import doobie.util.transactor.Strategy
import org.flywaydb.core.Flyway

object Commands {

  final case class RunMigrations(databaseConfig: DatabaseConfig)

  final case class RunServer(apiConfig: ApiConfig, dbConfig: DatabaseConfig)

  final case class RunImport(
      catalogRoot: String,
      config: DatabaseConfig,
      dryRun: Boolean
  )

  private def runImportOpts(implicit cs: ContextShift[IO]): Opts[RunImport] =
    Opts.subcommand("import", "Import a STAC catalog") {
      (
        Options.catalogRoot,
        Options.databaseConfig,
        Options.dryRun
      ).mapN(RunImport)
    }

  private def runMigrationsOpts(implicit cs: ContextShift[IO]): Opts[RunMigrations] =
    Opts.subcommand("migrate", "Runs migrations against database") {
      Options.databaseConfig map RunMigrations
    }

  private def runServerOpts(implicit cs: ContextShift[IO]): Opts[RunServer] =
    Opts.subcommand("serve", "Runs web service") {
      (Options.apiConfig, Options.databaseConfig) mapN RunServer
    }

  def runMigrations(dbConfig: DatabaseConfig): IO[ExitCode] = IO {
    Flyway
      .configure()
      .dataSource(
        s"${dbConfig.jdbcUrl}",
        dbConfig.dbUser,
        dbConfig.dbPass
      )
      .locations("classpath:migrations/")
      .load()
      .migrate()
    ExitCode.Success
  }

  def runImport(
      stacCatalog: String,
      config: DatabaseConfig,
      dryRun: Boolean
  )(
      implicit contextShift: ContextShift[IO]
  ): IO[Unit] = {
    val xa =
      Transactor.strategy.set(
        Transactor.fromDriverManager[IO](
          config.driver,
          config.jdbcUrl,
          config.dbUser,
          config.dbPass
        ),
        if (dryRun) {
          Strategy.default.copy(before = setAutoCommit(false), after = rollback, always = unit)
        } else { Strategy.default }
      )

    new StacImport(stacCatalog).runIO(xa)
  }

  def applicationCommand(implicit cs: ContextShift[IO]): Command[Product] =
    Command("", "Your Friendly Neighborhood OGC API - Features and STAC Web Service") {
      runServerOpts orElse runMigrationsOpts orElse runImportOpts
    }

} 
Example 74
Source File: CogAssetNodeImplicits.scala    From franklin   with Apache License 2.0 5 votes vote down vote up
package com.azavea.franklin.tile

import cats.effect.ContextShift
import cats.effect.IO
import geotrellis.proj4.WebMercator
import geotrellis.raster._
import geotrellis.raster.io.geotiff.AutoHigherResolution
import geotrellis.raster.resample.NearestNeighbor
import geotrellis.server.ExtentReification
import geotrellis.server.TmsReification
import geotrellis.vector.Extent

object CogAssetNodeImplicits extends TileUtil {

  implicit def cogAssetNodeTmsReification: TmsReification[CogAssetNode] =
    new TmsReification[CogAssetNode] {

      def tmsReification(
          self: CogAssetNode,
          buffer: Int
      )(implicit cs: ContextShift[IO]): (Int, Int, Int) => IO[ProjectedRaster[MultibandTile]] =
        (z: Int, x: Int, y: Int) => {
          def fetch(xCoord: Int, yCoord: Int): IO[Raster[MultibandTile]] = {
            self.fetchTile(z, xCoord, yCoord, WebMercator).flatMap(a => IO(a))
          }

          fetch(x, y).map { tile =>
            val extent = tmsLevels(z).mapTransform.keyToExtent(x, y)
            ProjectedRaster(tile.tile, extent, WebMercator)
          }
        }
    }

  implicit def cogAssetNodeExtentReification: ExtentReification[CogAssetNode] =
    new ExtentReification[CogAssetNode] {

      def extentReification(
          self: CogAssetNode
      )(implicit cs: ContextShift[IO]): (Extent, CellSize) => IO[ProjectedRaster[MultibandTile]] =
        (extent: Extent, cs: CellSize) => {
          self.getRasterSource map { rs =>
            rs.resample(
                TargetRegion(new GridExtent[Long](extent, cs)),
                NearestNeighbor,
                AutoHigherResolution
              )
              .read(extent)
              .map { ProjectedRaster(_, WebMercator) } match {
              case Some(mbt) => mbt
              case _ =>
                throw new Exception(
                  s"No tile available for RasterExtent: ${RasterExtent(extent, cs)}"
                )
            }
          }
        }
    }
} 
Example 75
Source File: TestServices.scala    From franklin   with Apache License 2.0 5 votes vote down vote up
package com.azavea.franklin.api

import cats.effect.{ContextShift, Sync}
import com.azavea.franklin.api.commands.ApiConfig
import com.azavea.franklin.api.services.{CollectionItemsService, CollectionsService, SearchService}
import doobie.Transactor
import eu.timepit.refined.types.numeric.{NonNegInt, PosInt}

class TestServices[F[_]: Sync](xa: Transactor[F])(implicit cs: ContextShift[F]) {

  val apiConfig: ApiConfig =
    ApiConfig(PosInt(9090), PosInt(9090), "localhost", "http", NonNegInt(30), true, false)

  val searchService: SearchService[F] =
    new SearchService[F](apiConfig.apiHost, NonNegInt(30), apiConfig.enableTiles, xa)

  val collectionsService: CollectionsService[F] = new CollectionsService[F](
    xa,
    apiConfig
  )

  val collectionItemsService: CollectionItemsService[F] = new CollectionItemsService[F](
    xa,
    apiConfig
  )

} 
Example 76
Source File: TVarTest.scala    From cats-stm   with Apache License 2.0 5 votes vote down vote up
package io.github.timwspence.cats.stm

import cats.effect.{ContextShift, IO, Timer}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funsuite.AsyncFunSuite 

import scala.concurrent.ExecutionContext

class TVarTest extends AsyncFunSuite with Matchers {
  implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global

  implicit val timer: Timer[IO] = IO.timer(executionContext)

  implicit val cs: ContextShift[IO] = IO.contextShift(executionContext)

  test("Get returns current value") {
    val prog: STM[String] = for {
      tvar  <- TVar.of("hello")
      value <- tvar.get
    } yield value

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe "hello"
    }
  }

  test("Set changes current value") {
    val prog: STM[String] = for {
      tvar  <- TVar.of("hello")
      _     <- tvar.set("world")
      value <- tvar.get
    } yield value

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe "world"
      value shouldBe "world"
    }
  }

  test("Modify changes current value") {
    val prog: STM[String] = for {
      tvar  <- TVar.of("hello")
      _     <- tvar.modify(_.toUpperCase)
      value <- tvar.get
    } yield value

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe "HELLO"
    }
  }

  test("Pending transaction is removed on success") {
    val tvar = TVar.of("foo").commit[IO].unsafeRunSync

    val prog: STM[String] = for {
      _     <- tvar.modify(_.toUpperCase)
      value <- tvar.get
    } yield value

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe "FOO"

      tvar.value shouldBe "FOO"
      tvar.pending.get.isEmpty shouldBe true
    }
  }

  test("Pending transaction is removed on failure") {
    val tvar = TVar.of("foo").commit[IO].unsafeRunSync

    val prog: STM[String] = for {
      _     <- tvar.modify(_.toUpperCase)
      _     <- STM.abort[String](new RuntimeException("boom"))
      value <- tvar.get
    } yield value

    for (_ <- prog.commit[IO].attempt.unsafeToFuture) yield {
      tvar.value shouldBe "foo"

      tvar.pending.get.isEmpty shouldBe true
    }
  }
} 
Example 77
Source File: PropertyTests.scala    From cats-stm   with Apache License 2.0 5 votes vote down vote up
package io.github.timwspence.cats.stm

import cats.effect.{ContextShift, IO, Timer}
import cats.instances.list._
import cats.syntax.functor._
import cats.syntax.traverse._
import org.scalacheck._
import org.scalatest.matchers.should.Matchers
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks

import scala.concurrent.ExecutionContext
import scala.util.Random


class MaintainsInvariants extends AnyFunSuite with ScalaCheckDrivenPropertyChecks with Matchers {
  implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global

  implicit val timer: Timer[IO] = IO.timer(executionContext)

  implicit val cs: ContextShift[IO] = IO.contextShift(executionContext)

  val tvarGen: Gen[TVar[Long]] = for {
    value <- Gen.posNum[Long]
  } yield TVar.of(value).commit[IO].unsafeRunSync

  val txnGen: List[TVar[Long]] => Gen[STM[Unit]] = tvars =>
    for {
      fromIdx <- Gen.choose(0, tvars.length - 1)
      toIdx   <- Gen.choose(0, tvars.length - 1) suchThat (_ != fromIdx)
      txn <- for {
        balance <- tvars(fromIdx).get
        transfer = Math.abs(Random.nextLong()) % balance
        _ <- tvars(fromIdx).modify(_ - transfer)
        _ <- tvars(toIdx).modify(_ + transfer)
      } yield ()
    } yield txn

  val gen: Gen[(Long, List[TVar[Long]], IO[Unit])] = for {
    tvars <- Gen.listOfN(50, tvarGen)
    total = tvars.foldLeft(0L)((acc, tvar) => acc + tvar.value)
    txns <- Gen.listOf(txnGen(tvars))
    commit = txns.traverse(_.commit[IO].start)
    run    = commit.flatMap(l => l.traverse(_.join)).void
  } yield (total, tvars, run)

  test("Transactions maintain invariants") {
    forAll(gen) { g =>
      val total = g._1
      val tvars = g._2
      val txn   = g._3

      txn.unsafeRunSync()

      tvars.map(_.value).sum shouldBe total
    }
  }

} 
Example 78
Source File: TQueueTest.scala    From cats-stm   with Apache License 2.0 5 votes vote down vote up
package io.github.timwspence.cats.stm

import cats.effect.{ContextShift, IO, Timer}
import cats.instances.string._
import cats.syntax.semigroup._
import org.scalatest.matchers.should.Matchers
import org.scalatest.funsuite.AsyncFunSuite 

import scala.concurrent.ExecutionContext

class TQueueTest extends AsyncFunSuite with Matchers {
  implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global

  implicit val timer: Timer[IO] = IO.timer(executionContext)

  implicit val cs: ContextShift[IO] = IO.contextShift(executionContext)

  test("Read removes the first element") {
    val prog: STM[(String, Boolean)] = for {
      tqueue <- TQueue.empty[String]
      _      <- tqueue.put("hello")
      value  <- tqueue.read
      empty  <- tqueue.isEmpty
    } yield value -> empty

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value._1 shouldBe "hello"
      value._2 shouldBe true
    }
  }

  test("Peek does not remove the first element") {
    val prog: STM[(String, Boolean)] = for {
      tqueue <- TQueue.empty[String]
      _      <- tqueue.put("hello")
      value  <- tqueue.peek
      empty  <- tqueue.isEmpty
    } yield value -> empty

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value._1 shouldBe "hello"
      value._2 shouldBe false
    }
  }

  test("TQueue is FIFO") {
    val prog: STM[String] = for {
      tqueue <- TQueue.empty[String]
      _      <- tqueue.put("hello")
      _      <- tqueue.put("world")
      hello  <- tqueue.read
      world  <- tqueue.peek
    } yield hello |+| world

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe "helloworld"
    }
  }

} 
Example 79
Source File: TSemaphoreTest.scala    From cats-stm   with Apache License 2.0 5 votes vote down vote up
package io.github.timwspence.cats.stm

import cats.effect.{ContextShift, IO, Timer}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funsuite.AsyncFunSuite

import scala.concurrent.ExecutionContext

class TSemaphoreTest extends AsyncFunSuite with Matchers {
  implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global

  implicit val timer: Timer[IO] = IO.timer(executionContext)

  implicit val cs: ContextShift[IO] = IO.contextShift(executionContext)

  test("Acquire decrements the number of permits") {
    val prog: STM[Long] = for {
      tsem  <- TSemaphore.make(1)
      _     <- tsem.acquire
      value <- tsem.available
    } yield value

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe 0
    }
  }

  test("Release increments the number of permits") {
    val prog: STM[Long] = for {
      tsem  <- TSemaphore.make(0)
      _     <- tsem.release
      value <- tsem.available
    } yield value

    for (value <- prog.commit[IO].unsafeToFuture) yield {
      value shouldBe 1
    }
  }

} 
Example 80
Source File: AkkaActorIntermediator.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing.akka

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.util.Timeout
import cats.effect.{ContextShift, Effect, IO, Timer}
import cats.syntax.all._
import ch.epfl.bluebrain.nexus.sourcing.akka.Msg._
import retry.CatsEffect._
import retry.syntax.all._
import retry.{RetryDetails, RetryPolicy}

import scala.reflect.ClassTag


abstract private[akka] class AkkaActorIntermediator[F[_]: Timer](
    name: String,
    selection: ActorRefSelection[F],
    askTimeout: Timeout
)(implicit F: Effect[F], as: ActorSystem, policy: RetryPolicy[F]) {

  implicit private[akka] val contextShift: ContextShift[IO]        = IO.contextShift(as.dispatcher)
  implicit private[akka] def noop[A]: (A, RetryDetails) => F[Unit] = retry.noop[F, A]
  implicit private val timeout: Timeout                            = askTimeout

  private[akka] def send[M <: Msg, Reply, A](id: String, msg: M, f: Reply => A)(implicit
      Reply: ClassTag[Reply]
  ): F[A] =
    selection(name, id).flatMap { ref =>
      val future = IO(ref ? msg)
      val fa     = IO.fromFuture(future).to[F]
      fa.flatMap[A] {
          case Reply(value)                     => F.pure(f(value))
          case te: TypeError                    => F.raiseError(te)
          case um: UnexpectedMsgId              => F.raiseError(um)
          case cet: CommandEvaluationTimeout[_] => F.raiseError(cet)
          case cee: CommandEvaluationError[_]   => F.raiseError(cee)
          case other                            => F.raiseError(TypeError(id, Reply.runtimeClass.getSimpleName, other))
        }
        .retryingOnAllErrors[Throwable]
    }
} 
Example 81
Source File: InMemoryAggregateSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing

import cats.effect.{ContextShift, IO, Timer}
import ch.epfl.bluebrain.nexus.sourcing.AggregateFixture._
import ch.epfl.bluebrain.nexus.sourcing.Command.{Increment, IncrementAsync, Initialize}
import ch.epfl.bluebrain.nexus.sourcing.Event.{Incremented, Initialized}
import ch.epfl.bluebrain.nexus.sourcing.State.Current

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class InMemoryAggregateSpec extends SourcingSpec {

  implicit val ctx: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO]      = IO.timer(ExecutionContext.global)

  "An InMemoryAggregate" should {

    val agg = Aggregate
      .inMemory[IO, Int]("global", initialState, AggregateFixture.next, AggregateFixture.evaluate[IO])
      .unsafeRunSync()

    "return its name" in {
      agg.name shouldEqual "global"
    }

    "update its state when accepting commands" in {
      agg.evaluateE(1, Increment(0, 2)).unsafeRunSync().rightValue shouldEqual Incremented(1, 2)
      agg
        .evaluate(1, IncrementAsync(1, 5, 200.millis))
        .unsafeRunSync()
        .rightValue shouldEqual (Current(2, 7) -> Incremented(2, 5))
      agg.currentState(1).unsafeRunSync() shouldEqual Current(2, 7)
    }

    "return its current seq nr" in {
      agg.lastSequenceNr(1).unsafeRunSync() shouldEqual 2L
    }

    "test without applying changes" in {
      agg.test(1, Initialize(0)).unsafeRunSync().leftValue
      agg.testE(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Initialized(3)
      agg.testS(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0)
      agg.currentState(1).unsafeRunSync() shouldEqual Current(2, 7)
    }

    "not update its state if evaluation fails" in {
      agg.evaluate(1, Initialize(0)).unsafeRunSync().leftValue
      agg.currentState(1).unsafeRunSync() shouldEqual Current(2, 7)
    }

    "evaluate commands one at a time" in {
      agg.evaluateS(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0)
      agg.currentState(1).unsafeRunSync() shouldEqual Current(3, 0)
      agg.evaluateS(1, IncrementAsync(3, 2, 300.millis)).unsafeToFuture()
      agg.evaluateE(1, IncrementAsync(4, 2, 20.millis)).unsafeRunSync().rightValue shouldEqual Incremented(5, 2)
      agg.currentState(1).unsafeRunSync() shouldEqual Current(5, 4)
    }

    "fold over the event stream in order" in {
      agg
        .foldLeft(1, (0, true)) {
          case ((lastRev, succeeded), event) => (event.rev, succeeded && event.rev - lastRev == 1)
        }
        .unsafeRunSync()
        ._2 shouldEqual true
    }

    "return all events" in {
      agg.foldLeft(1, 0) { case (acc, _) => acc + 1 }.unsafeRunSync() shouldEqual 5
    }

    "append events" in {
      agg.append(2, Incremented(1, 2)).unsafeRunSync() shouldEqual 1L
      agg.currentState(1).unsafeRunSync() shouldEqual Current(5, 4)
    }

    "return true for existing ids" in {
      agg.exists(1).unsafeRunSync() shouldEqual true
    }

    "return false for unknown ids" in {
      agg.exists(Int.MaxValue).unsafeRunSync() shouldEqual false
    }

    "return the sequence number for a snapshot" in {
      agg.snapshot(1).unsafeRunSync() shouldEqual 5L
    }

  }

} 
Example 82
Source File: ProjectionsSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing.projections

import akka.actor.ActorSystem
import akka.persistence.query.Offset
import akka.stream.scaladsl.Source
import akka.testkit.{TestKit, TestKitBase}
import cats.effect.{ContextShift, IO}
import ch.epfl.bluebrain.nexus.sourcing.projections.Fixture.memoize
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress._
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionsSpec.SomeEvent
import io.circe.generic.auto._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.{BeforeAndAfterAll, DoNotDiscover}

import scala.concurrent.duration._

//noinspection TypeAnnotation
@DoNotDiscover
class ProjectionsSpec
    extends TestKitBase
    with AnyWordSpecLike
    with Matchers
    with TestHelpers
    with IOValues
    with Eventually
    with BeforeAndAfterAll {

  implicit override lazy val system: ActorSystem      = SystemBuilder.persistence("ProjectionsSpec")
  implicit private val contextShift: ContextShift[IO] = IO.contextShift(system.dispatcher)

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  "A Projection" should {
    val id            = genString()
    val persistenceId = s"/some/${genString()}"
    val projections   = memoize(Projections[IO, SomeEvent]).unsafeRunSync()
    val progress      = OffsetProgress(Offset.sequence(42), 42, 42, 0)

    "store progress" in {
      projections.ioValue.recordProgress(id, progress).ioValue
    }

    "retrieve stored progress" in {
      projections.ioValue.progress(id).ioValue shouldEqual progress
    }

    "retrieve NoProgress for unknown projections" in {
      projections.ioValue.progress(genString()).ioValue shouldEqual NoProgress
    }

    val firstOffset: Offset  = Offset.sequence(42)
    val secondOffset: Offset = Offset.sequence(98)
    val firstEvent           = SomeEvent(1L, "description")
    val secondEvent          = SomeEvent(2L, "description2")

    "store an event" in {
      projections.ioValue.recordFailure(id, persistenceId, 1L, firstOffset, firstEvent).ioValue
    }

    "store another event" in {
      projections.ioValue.recordFailure(id, persistenceId, 2L, secondOffset, secondEvent).ioValue
    }

    "retrieve stored events" in {
      val expected = Seq((firstEvent, firstOffset), (secondEvent, secondOffset))
      eventually {
        logOf(projections.ioValue.failures(id)) should contain theSameElementsInOrderAs expected
      }
    }

    "retrieve empty list of events for unknown failure log" in {
      eventually {
        logOf(projections.ioValue.failures(genString())) shouldBe empty
      }
    }

  }

  private def logOf(source: Source[(SomeEvent, Offset), _]): Vector[(SomeEvent, Offset)] = {
    val f = source.runFold(Vector.empty[(SomeEvent, Offset)])(_ :+ _)
    IO.fromFuture(IO(f)).ioValue
  }

  implicit override def patienceConfig: PatienceConfig =
    PatienceConfig(30.seconds, 50.milliseconds)
}

object ProjectionsSpec {
  final case class SomeEvent(rev: Long, description: String)
} 
Example 83
Source File: InMemoryStateMachineSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing

import cats.effect.{ContextShift, IO, Timer}
import ch.epfl.bluebrain.nexus.sourcing.Command.{Increment, IncrementAsync, Initialize}
import ch.epfl.bluebrain.nexus.sourcing.State.Current
import ch.epfl.bluebrain.nexus.sourcing.StateMachineFixture._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class InMemoryStateMachineSpec extends SourcingSpec {

  implicit val ctx: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO]      = IO.timer(ExecutionContext.global)

  "An InMemoryStateMachine" should {

    val cache = StateMachine.inMemory[IO, Int]("global", initialState, evaluate[IO]).unsafeRunSync()

    "return its name" in {
      cache.name shouldEqual "global"
    }

    "update its state when accepting commands" in {
      cache.evaluate(1, Increment(0, 2)).unsafeRunSync().rightValue shouldEqual Current(1, 2)
      cache.evaluate(1, IncrementAsync(1, 5, 200.millis)).unsafeRunSync().rightValue shouldEqual Current(2, 7)
      cache.currentState(1).unsafeRunSync() shouldEqual Current(2, 7)
    }

    "test without applying changes" in {
      cache.test(1, Initialize(0)).unsafeRunSync().leftValue
      cache.test(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0)
      cache.test(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0)
      cache.currentState(1).unsafeRunSync() shouldEqual Current(2, 7)
    }

    "not update its state if evaluation fails" in {
      cache.evaluate(1, Initialize(0)).unsafeRunSync().leftValue
      cache.currentState(1).unsafeRunSync() shouldEqual Current(2, 7)
    }

    "evaluate commands one at a time" in {
      cache.evaluate(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0)
      cache.currentState(1).unsafeRunSync() shouldEqual Current(3, 0)
      cache.evaluate(1, IncrementAsync(3, 2, 300.millis)).unsafeToFuture()
      cache.evaluate(1, IncrementAsync(4, 2, 20.millis)).unsafeRunSync().rightValue shouldEqual Current(5, 4)
      cache.currentState(1).unsafeRunSync() shouldEqual Current(5, 4)
    }

  }

} 
Example 84
Source File: Main.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli

import cats.Parallel
import cats.effect.{ContextShift, ExitCode, Timer}
import cats.syntax.all._
import monix.catnap.SchedulerEffect
import monix.eval.{Task, TaskApp}

// $COVERAGE-OFF$
object Main extends TaskApp {

  override def run(args: List[String]): Task[ExitCode] = {
    implicit val cs: ContextShift[Task] = SchedulerEffect.contextShift[Task](scheduler)
    implicit val tm: Timer[Task]        = SchedulerEffect.timer[Task](scheduler)
    implicit val pl: Parallel[Task]     = Task.catsParallel
    Cli(args, sys.env).recoverWith {
      case err: CliError => Task.delay(println(err.show)).as(ExitCode.Error)
    }
  }

} 
Example 85
Source File: Influx.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.modules.influx

import cats.Parallel
import cats.effect.{ConcurrentEffect, ContextShift, ExitCode, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.cli.AbstractCommand
import com.monovore.decline.Opts
import distage.TagK
import izumi.distage.model.recursive.LocatorRef


final class Influx[F[_]: Timer: Parallel: ContextShift: TagK](locatorOpt: Option[LocatorRef])(implicit
    F: ConcurrentEffect[F]
) extends AbstractCommand[F](locatorOpt) {

  def subcommand: Opts[F[ExitCode]] =
    Opts.subcommand("influxdb", "influxDB projection.") {
      run
    }

  def run: Opts[F[ExitCode]] =
    Opts.subcommand("run", "Runs the influxDB projection") {
      locatorResource.map { _.use { locator => locator.get[InfluxProjection[F]].run.as(ExitCode.Success) } }
    }

}

object Influx {

  final def apply[F[_]: TagK: ConcurrentEffect: Timer: Parallel: ContextShift](
      locatorOpt: Option[LocatorRef] = None
  ): Influx[F] =
    new Influx[F](locatorOpt)

} 
Example 86
Source File: Postgres.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.modules.postgres

import cats.Parallel
import cats.effect.{ConcurrentEffect, ContextShift, ExitCode, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.cli.AbstractCommand
import com.monovore.decline.Opts
import distage.TagK
import izumi.distage.model.recursive.LocatorRef


final class Postgres[F[_]: Timer: Parallel: ContextShift: TagK](locatorOpt: Option[LocatorRef])(implicit
    F: ConcurrentEffect[F]
) extends AbstractCommand[F](locatorOpt) {

  def subcommand: Opts[F[ExitCode]] =
    Opts.subcommand("postgres", "Postgres database projection.") {
      run
    }

  def run: Opts[F[ExitCode]] =
    Opts.subcommand("run", "Runs the postgres database projection") {
      locatorResource.map { _.use { locator => locator.get[PostgresProjection[F]].run.as(ExitCode.Success) } }
    }

}

object Postgres {

  final def apply[F[_]: TagK: ConcurrentEffect: Timer: Parallel: ContextShift](
      locatorOpt: Option[LocatorRef] = None
  ): Postgres[F] =
    new Postgres[F](locatorOpt)

} 
Example 87
Source File: PostgresModule.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.modules.postgres

import cats.Parallel
import cats.effect.{ConcurrentEffect, ContextShift, Timer}
import ch.epfl.bluebrain.nexus.cli.config.AppConfig
import distage.{ModuleDef, TagK}
import doobie.util.transactor.Transactor
import izumi.distage.model.definition.StandardAxis.Repo
import izumi.distage.model.recursive.LocatorRef

final class PostgresModule[F[_]: Parallel: ContextShift: ConcurrentEffect: Timer: TagK] extends ModuleDef {
  make[Postgres[F]].tagged(Repo.Prod).from { locatorRef: LocatorRef => Postgres[F](Some(locatorRef)) }
  make[Transactor[F]].tagged(Repo.Prod).from { (cfg: AppConfig) =>
    Transactor.fromDriverManager[F](
      "org.postgresql.Driver",
      cfg.postgres.jdbcUrl,
      cfg.postgres.username,
      cfg.postgres.password
    )
  }
  make[PostgresProjection[F]].tagged(Repo.Prod)
}

object PostgresModule {
  final def apply[F[_]: Parallel: ContextShift: ConcurrentEffect: Timer: TagK]: PostgresModule[F] =
    new PostgresModule[F]
} 
Example 88
Source File: Config.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.modules.config

import cats.Parallel
import cats.effect.{ConcurrentEffect, ContextShift, ExitCode, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.cli.config.AppConfig
import ch.epfl.bluebrain.nexus.cli.{AbstractCommand, Console}
import com.monovore.decline.Opts
import com.typesafe.config.ConfigRenderOptions
import distage.TagK
import izumi.distage.model.recursive.LocatorRef
import pureconfig.ConfigWriter


final class Config[F[_]: Timer: Parallel: ContextShift: TagK](locatorOpt: Option[LocatorRef])(implicit
    F: ConcurrentEffect[F]
) extends AbstractCommand[F](locatorOpt) {

  def subcommand: Opts[F[ExitCode]] =
    Opts.subcommand("config", "Read or write the tool configuration.") {
      show
    }

  private def show: Opts[F[ExitCode]] =
    Opts.subcommand("show", "Print the current configuration") {
      locatorResource.map {
        _.use { locator =>
          val console = locator.get[Console[F]]
          val cfg     = locator.get[AppConfig]
          console.println(renderConfig(cfg)).as(ExitCode.Success)
        }
      }
    }

  private def renderConfig(cfg: AppConfig): String = {
    val opts = ConfigRenderOptions.concise().setComments(false).setJson(false).setFormatted(true)
    ConfigWriter[AppConfig].to(cfg).render(opts)
  }
}

object Config {
  final def apply[F[_]: TagK: ConcurrentEffect: Timer: Parallel: ContextShift](
      locatorOpt: Option[LocatorRef] = None
  ): Config[F] =
    new Config[F](locatorOpt)
} 
Example 89
Source File: AbstractCommand.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli

import cats.Parallel
import cats.effect.{ConcurrentEffect, ContextShift, Resource, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.cli.CliOpts._
import ch.epfl.bluebrain.nexus.cli.config.AppConfig
import ch.epfl.bluebrain.nexus.cli.modules.config.ConfigModule
import ch.epfl.bluebrain.nexus.cli.modules.influx.InfluxModule
import ch.epfl.bluebrain.nexus.cli.modules.postgres.PostgresModule
import com.monovore.decline.Opts
import distage.{Injector, TagK}
import izumi.distage.model.Locator
import izumi.distage.model.definition.StandardAxis.Repo
import izumi.distage.model.definition.{Activation, Module, ModuleDef}
import izumi.distage.model.plan.Roots
import izumi.distage.model.recursive.LocatorRef

abstract class AbstractCommand[F[_]: TagK: Timer: ContextShift: Parallel](locatorOpt: Option[LocatorRef])(implicit
    F: ConcurrentEffect[F]
) {

  protected def locatorResource: Opts[Resource[F, Locator]] =
    locatorOpt match {
      case Some(value) => Opts(Resource.make(F.delay(value.get))(_ => F.unit))
      case None        =>
        (envConfig.orNone, postgresConfig.orNone, influxConfig.orNone, token.orNone).mapN {
          case (e, p, i, t) =>
            val res: Resource[F, Module] = Resource.make({
              AppConfig.load[F](e, p, i, t).flatMap[Module] {
                case Left(err)    => F.raiseError(err)
                case Right(value) =>
                  val effects  = EffectModule[F]
                  val cli      = CliModule[F]
                  val config   = ConfigModule[F]
                  val postgres = PostgresModule[F]
                  val influx   = InfluxModule[F]
                  val modules  = effects ++ cli ++ config ++ postgres ++ influx ++ new ModuleDef {
                    make[AppConfig].from(value)
                  }
                  F.pure(modules)
              }
            })(_ => F.unit)

            res.flatMap { modules =>
              Injector(Activation(Repo -> Repo.Prod)).produceF[F](modules, Roots.Everything).toCats
            }
        }
    }
} 
Example 90
Source File: InfluxDocker.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.influx

import cats.effect.{ConcurrentEffect, ContextShift, Timer}
import distage.TagK
import izumi.distage.docker.Docker.{ContainerConfig, DockerPort}
import izumi.distage.docker.modules.DockerSupportModule
import izumi.distage.docker.{ContainerDef, Docker}
import izumi.distage.model.definition.ModuleDef
import org.http4s.Uri

object InfluxDocker extends ContainerDef {

  val primaryPort: DockerPort = DockerPort.TCP(8086)

  override def config: InfluxDocker.Config =
    ContainerConfig(
      image = "library/influxdb:1.8.0",
      ports = Seq(primaryPort),
      env = Map("INFLUXDB_REPORTING_DISABLED" -> "true", "INFLUXDB_HTTP_FLUX_ENABLED" -> "true"),
      reuse = true
    )

  class Module[F[_]: ConcurrentEffect: ContextShift: Timer: TagK] extends ModuleDef {
    make[InfluxDocker.Container].fromResource {
      InfluxDocker.make[F]
    }

    make[InfluxHostConfig].from { docker: InfluxDocker.Container =>
      val knownAddress = docker.availablePorts.availablePorts(primaryPort).head
      InfluxHostConfig(knownAddress.hostV4, knownAddress.port)
    }

    // add docker dependencies and override default configuration
    include(new DockerSupportModule[F] overridenBy new ModuleDef {
      make[Docker.ClientConfig].from {
        Docker.ClientConfig(
          readTimeoutMs = 60000, // long timeout for gh actions
          connectTimeoutMs = 500,
          allowReuse = false,
          useRemote = false,
          useRegistry = true,
          remote = None,
          registry = None
        )
      }
    })
  }

  final case class InfluxHostConfig(host: String, port: Int) {
    def endpoint: Uri = Uri.unsafeFromString(s"http://$host:$port")
  }
} 
Example 91
Source File: PostgresDocker.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.postgres

import cats.effect.{ConcurrentEffect, ContextShift, Timer}
import distage.TagK
import izumi.distage.docker.Docker.{ContainerConfig, DockerPort}
import izumi.distage.docker.modules.DockerSupportModule
import izumi.distage.docker.{ContainerDef, Docker}
import izumi.distage.model.definition.ModuleDef

object PostgresDocker extends ContainerDef {

  val primaryPort: DockerPort = DockerPort.TCP(5432)
  val password: String        = "postgres"

  override def config: PostgresDocker.Config =
    ContainerConfig(
      image = "library/postgres:12.2",
      ports = Seq(primaryPort),
      env = Map("POSTGRES_PASSWORD" -> password),
      reuse = true
    )

  class Module[F[_]: ConcurrentEffect: ContextShift: Timer: TagK] extends ModuleDef {
    make[PostgresDocker.Container].fromResource {
      PostgresDocker.make[F]
    }

    make[PostgresHostConfig].from { docker: PostgresDocker.Container =>
      val knownAddress = docker.availablePorts.availablePorts(primaryPort).head
      PostgresHostConfig(knownAddress.hostV4, knownAddress.port)
    }

    // add docker dependencies and override default configuration
    include(new DockerSupportModule[F] overridenBy new ModuleDef {
      make[Docker.ClientConfig].from {
        Docker.ClientConfig(
          readTimeoutMs = 60000, // long timeout for gh actions
          connectTimeoutMs = 500,
          allowReuse = false,
          useRemote = false,
          useRegistry = true,
          remote = None,
          registry = None
        )
      }
    })
  }

  final case class PostgresHostConfig(host: String, port: Int)
} 
Example 92
Source File: OffsetSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.sse

import java.nio.file.{Files, Path}
import java.util.UUID

import cats.effect.{Blocker, ContextShift, IO}
import ch.epfl.bluebrain.nexus.cli.Console
import ch.epfl.bluebrain.nexus.cli.dummies.TestConsole
import org.scalatest.OptionValues
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

class OffsetSpec extends AnyWordSpecLike with Matchers with OptionValues {

  implicit protected val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit private val blocker: Blocker       = Blocker.liftExecutionContext(ExecutionContext.global)
  implicit private val console: Console[IO]   = TestConsole[IO].unsafeRunSync()

  abstract class Ctx {
    protected val uuid: UUID = UUID.randomUUID
    protected val file: Path = Files.createTempFile("offset", ".conf")
  }

  "An offset" should {
    "be loaded from configuration" in new Ctx {
      Files.writeString(file, uuid.toString)
      (for {
        offset <- Offset.load(file)
        _       = offset.value shouldEqual Offset(uuid)
      } yield Files.deleteIfExists(file)).unsafeRunSync()
    }

    "be loaded from configuration but failed to convert to UUID" in new Ctx {
      Files.writeString(file, "not-an-uuid")
      (for {
        offset <- Offset.load(file)
        _       = offset shouldEqual None
      } yield Files.deleteIfExists(file)).unsafeRunSync()
    }

    "be written to file" in new Ctx {
      val offset = Offset(UUID.randomUUID())
      (for {
        _ <- offset.write(file)
        _  = Files.readString(file) shouldEqual offset.value.toString
      } yield Files.deleteIfExists(file)).unsafeRunSync()
    }
  }
} 
Example 93
Source File: TestCliModule.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.dummies

import cats.Parallel
import cats.effect.{ConcurrentEffect, ContextShift, Timer}
import ch.epfl.bluebrain.nexus.cli.clients.{EventStreamClient, ProjectClient, SparqlClient}
import ch.epfl.bluebrain.nexus.cli.sse.Event
import ch.epfl.bluebrain.nexus.cli.sse.OrgUuid.unsafe._
import ch.epfl.bluebrain.nexus.cli.sse.ProjectUuid.unsafe._
import ch.epfl.bluebrain.nexus.cli.{Cli, Console}
import distage.{ModuleDef, TagK}
import izumi.distage.model.definition.StandardAxis.Repo
import izumi.distage.model.recursive.LocatorRef

final class TestCliModule[F[_]: Parallel: ContextShift: Timer: ConcurrentEffect: TagK](events: List[Event])
    extends ModuleDef {
  make[TestConsole[F]].tagged(Repo.Dummy).fromEffect(TestConsole[F])
  make[Console[F]].tagged(Repo.Dummy).from { tc: TestConsole[F] => tc }

  make[ProjectClient[F]]
    .tagged(Repo.Dummy)
    .from(
      new TestProjectClient[F](
        // matches the uuids from the events.json file used for testing
        Map(
          (
            ("e6a84231-5df7-41cf-9d18-286892d119ec", "d576d282-1049-4a0c-9240-ff34b5e879f2"),
            ("tutorialnexus", "datamodels")
          ),
          (
            ("a605b71a-377d-4df3-95f8-923149d04106", "a7d69059-8d1d-4dac-800f-90b6b6ab94ee"),
            ("bbp", "atlas")
          )
        )
      )
    )

  make[SparqlClient[F]].tagged(Repo.Dummy).fromEffect { TestSparqlClient[F](events) }

  make[EventStreamClient[F]].tagged(Repo.Dummy).from { pc: ProjectClient[F] =>
    new TestEventStreamClient[F](events, pc)
  }

  make[Cli[F]].tagged(Repo.Dummy).from { locatorRef: LocatorRef => new Cli[F](Some(locatorRef)) }
}

object TestCliModule {

  final def apply[F[_]: Parallel: ContextShift: Timer: ConcurrentEffect: TagK](events: List[Event]): TestCliModule[F] =
    new TestCliModule[F](events)

} 
Example 94
Source File: AttributesCache.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.attributes

import java.nio.file.Path
import java.time.Clock

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.{ask, AskTimeoutException}
import akka.util.Timeout
import cats.effect.{ContextShift, Effect, IO}
import cats.implicits._
import ch.epfl.bluebrain.nexus.storage.File.FileAttributes
import ch.epfl.bluebrain.nexus.storage.StorageError.{InternalError, OperationTimedOut}
import ch.epfl.bluebrain.nexus.storage.attributes.AttributesCacheActor.Protocol._
import ch.epfl.bluebrain.nexus.storage.config.AppConfig.DigestConfig
import com.typesafe.scalalogging.Logger

import scala.util.control.NonFatal

trait AttributesCache[F[_]] {

  
  def asyncComputePut(filePath: Path, algorithm: String): Unit
}

object AttributesCache {
  private[this] val logger = Logger[this.type]

  def apply[F[_], Source](implicit
      system: ActorSystem,
      clock: Clock,
      tm: Timeout,
      F: Effect[F],
      computation: AttributesComputation[F, Source],
      config: DigestConfig
  ): AttributesCache[F] =
    apply(system.actorOf(AttributesCacheActor.props(computation)))

  private[attributes] def apply[F[_]](
      underlying: ActorRef
  )(implicit system: ActorSystem, tm: Timeout, F: Effect[F]): AttributesCache[F] =
    new AttributesCache[F] {
      implicit private val contextShift: ContextShift[IO] = IO.contextShift(system.dispatcher)

      override def get(filePath: Path): F[FileAttributes] =
        IO.fromFuture(IO.shift(system.dispatcher) >> IO(underlying ? Get(filePath)))
          .to[F]
          .flatMap[FileAttributes] {
            case attributes: FileAttributes => F.pure(attributes)
            case other                      =>
              logger.error(s"Received unexpected reply from the file attributes cache: '$other'")
              F.raiseError(InternalError("Unexpected reply from the file attributes cache"))
          }
          .recoverWith {
            case _: AskTimeoutException =>
              F.raiseError(OperationTimedOut("reply from the file attributes cache timed out"))
            case NonFatal(th)           =>
              logger.error("Exception caught while exchanging messages with the file attributes cache", th)
              F.raiseError(InternalError("Exception caught while exchanging messages with the file attributes cache"))
          }

      override def asyncComputePut(filePath: Path, algorithm: String): Unit =
        underlying ! Compute(filePath)

    }
} 
Example 95
Source File: IamIdentitiesClient.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding.Get
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.unmarshalling.FromEntityUnmarshaller
import akka.util.ByteString
import cats.effect.{ContextShift, Effect, IO}
import cats.implicits._
import ch.epfl.bluebrain.nexus.rdf.implicits._
import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClient.Identity._
import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClient._
import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClientError.IdentitiesSerializationError
import ch.epfl.bluebrain.nexus.storage.config.IamClientConfig
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport.{DecodingFailures => AccDecodingFailures}
import io.circe.Decoder.Result
import io.circe.{Decoder, DecodingFailure, HCursor}

import scala.concurrent.ExecutionContext

class IamIdentitiesClient[F[_]](config: IamClientConfig)(implicit F: Effect[F], as: ActorSystem)
    extends JsonLdCirceSupport {

  private val um: FromEntityUnmarshaller[Caller]      = unmarshaller[Caller]
  implicit private val ec: ExecutionContext           = as.dispatcher
  implicit private val contextShift: ContextShift[IO] = IO.contextShift(ec)

  def apply()(implicit credentials: Option[AccessToken]): F[Caller] =
    credentials match {
      case Some(token) => execute(Get(config.identitiesIri.asAkka).addCredentials(OAuth2BearerToken(token.value)))
      case None        => F.pure(Caller.anonymous)
    }

  private def execute(req: HttpRequest): F[Caller] = {
    IO.fromFuture(IO(Http().singleRequest(req))).to[F].flatMap { resp =>
      if (resp.status.isSuccess())
        IO.fromFuture(IO(um(resp.entity))).to[F].recoverWith {
          case err: AccDecodingFailures => F.raiseError(IdentitiesSerializationError(err.getMessage))
          case err: Error               => F.raiseError(IdentitiesSerializationError(err.getMessage))
        }
      else
        IO.fromFuture(IO(resp.entity.dataBytes.runFold(ByteString(""))(_ ++ _).map(_.utf8String)))
          .to[F]
          .flatMap { err => F.raiseError(IamIdentitiesClientError.unsafe(resp.status, err)) }
    }
  }

}

object IamIdentitiesClient {

  
    final case class Authenticated(realm: String) extends Identity

    private def decodeAnonymous(hc: HCursor): Result[Subject] =
      hc.get[String]("@type").flatMap {
        case "Anonymous" => Right(Anonymous)
        case _           => Left(DecodingFailure("Cannot decode Anonymous Identity", hc.history))
      }

    private def decodeUser(hc: HCursor): Result[Subject] =
      (hc.get[String]("subject"), hc.get[String]("realm")).mapN {
        case (subject, realm) => User(subject, realm)
      }

    private def decodeGroup(hc: HCursor): Result[Identity] =
      (hc.get[String]("group"), hc.get[String]("realm")).mapN {
        case (group, realm) => Group(group, realm)
      }

    private def decodeAuthenticated(hc: HCursor): Result[Identity] =
      hc.get[String]("realm").map(Authenticated)

    private val attempts =
      List[HCursor => Result[Identity]](decodeAnonymous, decodeUser, decodeGroup, decodeAuthenticated)

    implicit val identityDecoder: Decoder[Identity] =
      Decoder.instance { hc =>
        attempts.foldLeft(Left(DecodingFailure("Unexpected", hc.history)): Result[Identity]) {
          case (acc @ Right(_), _) => acc
          case (_, f)              => f(hc)
        }
      }
  }

} 
Example 96
Source File: DoobieSpec.scala    From cron4s   with Apache License 2.0 5 votes vote down vote up
package cron4s
package doobie

import cats.effect.{IO, ContextShift}

import _root_.doobie._
import _root_.doobie.implicits._
import _root_.doobie.util.invariant._

import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec

import scala.concurrent.ExecutionContext

class DoobieSpec extends AnyFlatSpec with Matchers {
  implicit val contextShift: ContextShift[IO] =
    IO.contextShift(ExecutionContext.global)

  val xa = Transactor.fromDriverManager[IO](
    "org.h2.Driver",
    "jdbc:h2:mem:refined;DB_CLOSE_DELAY=-1",
    "sa",
    ""
  )

  def insertMeeting(meeting: Meeting) = {
    val createTable = sql"""
       create table meeting(
        meeting_id BIGINT AUTO_INCREMENT PRIMARY KEY,
        subject VARCHAR(255) NOT NULL,
        description VARCHAR(255) NOT NULL,
        frequency VARCHAR(255) NOT NULL
      )
      """

    val insertRecord = sql"""
      insert into meeting(subject, description, frequency)
      values(${meeting.subject}, ${meeting.description}, ${meeting.frequency})
      """

    for {
      _  <- createTable.update.run
      id <- insertRecord.update.withUniqueGeneratedKeys[Long]("meeting_id")
    } yield MeetingId(id)
  }

  def loadMeeting(meetingId: MeetingId) =
    sql"select subject, description, frequency from meeting where meeting_id = $meetingId"
      .query[Meeting]
      .unique

  "Doobie" should "store and retrieve a cron expression as a member of a storable data structure" in {
    val standUpMeeting = Meeting(
      "Daily stand-up",
      "Daily team morning stand-up meeting",
      Cron.unsafeParse("0 0 10 ? * mon-fri")
    )

    val tx = for {
      meetingId <- insertMeeting(standUpMeeting)
      loaded    <- loadMeeting(meetingId)
    } yield loaded

    val loadedMeeting = tx.transact(xa).unsafeRunSync()
    loadedMeeting shouldBe standUpMeeting
  }

  it should "throw a SecondaryValidationFailed in case the cron expression is invalid" in {
    assertThrows[SecondaryValidationFailed[CronExpr]] {
      sql"select '0- 0 30 * * ?'".query[CronExpr].unique.transact(xa).unsafeRunSync()
    }
  }
} 
Example 97
Source File: package.scala    From pureconfig   with Mozilla Public License 2.0 5 votes vote down vote up
package pureconfig.module.catseffect

import scala.language.higherKinds
import scala.reflect.ClassTag

import cats.effect.{ Blocker, ContextShift, Sync }
import pureconfig.{ ConfigReader, ConfigSource, Derivation }
import pureconfig.module.catseffect

package object syntax {

  implicit class CatsEffectConfigSource(private val cs: ConfigSource) extends AnyVal {

    @inline
    final def loadF[F[_], A](blocker: Blocker)(implicit F: Sync[F], csf: ContextShift[F], reader: Derivation[ConfigReader[A]], ct: ClassTag[A]): F[A] =
      catseffect.loadF(cs, blocker)

    @deprecated("Use `cs.loadF[F, A](blocker)` instead", "0.12.3")
    def loadF[F[_], A](implicit F: Sync[F], reader: Derivation[ConfigReader[A]], ct: ClassTag[A]): F[A] =
      catseffect.loadF(cs)
  }
} 
Example 98
Source File: PostgresTransactor.scala    From ticket-booking-aecor   with Apache License 2.0 5 votes vote down vote up
package ru.pavkin.booking.common.postgres

import cats.effect.{Async, ContextShift, Resource}
import doobie.hikari.HikariTransactor
import doobie.util.ExecutionContexts
import ru.pavkin.booking.config.PostgresConfig

object PostgresTransactor {
  def transactor[F[_]](
    config: PostgresConfig
  )(implicit F: Async[F], C: ContextShift[F]): Resource[F, HikariTransactor[F]] =
    for {
      ce <- ExecutionContexts.fixedThreadPool[F](32)
      te <- ExecutionContexts.cachedThreadPool[F]
      tr <- HikariTransactor.newHikariTransactor[F](
             "org.postgresql.Driver",
             s"jdbc:postgresql://${config.contactPoints}:${config.port}/${config.database}",
             config.username,
             config.password,
             ce,
             te
           )
      _ <- Resource.liftF(tr.configure(ds => F.delay(ds.setAutoCommit(false))))
    } yield tr
} 
Example 99
Source File: StaticLoggerBinder.scala    From odin   with Apache License 2.0 5 votes vote down vote up
package org.slf4j.impl

import cats.effect.{ConcurrentEffect, ContextShift, IO, Timer}
import io.odin._
import io.odin.slf4j.{BufferingLogger, OdinLoggerBinder}

import scala.concurrent.ExecutionContext

class StaticLoggerBinder extends OdinLoggerBinder[IO] {

  val ec: ExecutionContext = scala.concurrent.ExecutionContext.global
  implicit val timer: Timer[IO] = IO.timer(ec)
  implicit val cs: ContextShift[IO] = IO.contextShift(ec)
  implicit val F: ConcurrentEffect[IO] = IO.ioConcurrentEffect

  val loggers: PartialFunction[String, Logger[IO]] = {
    case Level.Trace.toString => new BufferingLogger[IO](Level.Trace)
    case Level.Debug.toString => new BufferingLogger[IO](Level.Debug)
    case Level.Info.toString  => new BufferingLogger[IO](Level.Info)
    case Level.Warn.toString  => new BufferingLogger[IO](Level.Warn)
    case Level.Error.toString => new BufferingLogger[IO](Level.Error)
    case _ =>
      new BufferingLogger[IO](Level.Trace)
  }
}

object StaticLoggerBinder extends StaticLoggerBinder {

  var REQUESTED_API_VERSION: String = "1.7"

  def getSingleton: StaticLoggerBinder = this

} 
Example 100
Source File: ConditionalLogger.scala    From odin   with Apache License 2.0 5 votes vote down vote up
package io.odin.extras.loggers

import cats.MonadError
import cats.effect.{Concurrent, ContextShift, ExitCase, Resource, Timer}
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
import cats.syntax.functor._
import cats.syntax.order._
import io.odin.loggers.DefaultLogger
import io.odin.{Level, Logger, LoggerMessage}
import monix.catnap.ConcurrentQueue
import monix.execution.{BufferCapacity, ChannelType}

final case class ConditionalLogger[F[_]: Timer] private (
    queue: ConcurrentQueue[F, LoggerMessage],
    inner: Logger[F],
    override val minLevel: Level
)(implicit F: MonadError[F, Throwable])
    extends DefaultLogger[F](minLevel) {

  def log(msg: LoggerMessage): F[Unit] =
    queue.tryOffer(msg).void

  private def drain(exitCase: ExitCase[Throwable]): F[Unit] = {
    val level = exitCase match {
      case ExitCase.Completed => inner.minLevel
      case _                  => minLevel
    }

    queue
      .drain(0, Int.MaxValue)
      .flatMap(msgs => inner.log(msgs.filter(_.level >= level).toList))
      .attempt
      .void
  }

}

object ConditionalLogger {

  
  def create[F[_]: Timer: Concurrent: ContextShift](
      inner: Logger[F],
      minLevelOnError: Level,
      maxBufferSize: Option[Int]
  ): Resource[F, Logger[F]] = {

    val queueCapacity = maxBufferSize match {
      case Some(value) => BufferCapacity.Bounded(value)
      case None        => BufferCapacity.Unbounded()
    }

    def acquire: F[ConditionalLogger[F]] =
      for {
        queue <- ConcurrentQueue.withConfig[F, LoggerMessage](queueCapacity, ChannelType.MPSC)
      } yield ConditionalLogger(queue, inner, minLevelOnError)

    def release(logger: ConditionalLogger[F], exitCase: ExitCase[Throwable]): F[Unit] =
      logger.drain(exitCase)

    Resource.makeCase(acquire)(release).widen
  }

} 
Example 101
Source File: DynamoDbStreamsCatsIOClient.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
// Auto-Generated
package com.github.j5ik2o.reactive.aws.dynamodb.streams.cats

import cats.effect.{ ContextShift, IO }
import com.github.j5ik2o.reactive.aws.dynamodb.streams.{ DynamoDbStreamsAsyncClient, DynamoDbStreamsClient }
import software.amazon.awssdk.services.dynamodb.model._
import software.amazon.awssdk.services.dynamodb.streams.paginators.{ DescribeStreamPublisher, ListStreamsPublisher }

import scala.concurrent.{ ExecutionContext, Future }

object DynamoDbStreamsCatsIOClient {

  def apply(asyncClient: DynamoDbStreamsAsyncClient)(implicit ec: ExecutionContext): DynamoDbStreamsCatsIOClient =
    new DynamoDbStreamsCatsIOClient {
      override val executionContext: ExecutionContext     = ec
      override val underlying: DynamoDbStreamsAsyncClient = asyncClient
    }

}

trait DynamoDbStreamsCatsIOClient extends DynamoDbStreamsClient[IO] {

  val underlying: DynamoDbStreamsAsyncClient

  def executionContext: ExecutionContext
  implicit def cs: ContextShift[IO] = IO.contextShift(executionContext)

  override def describeStream(describeStreamRequest: DescribeStreamRequest): IO[DescribeStreamResponse] =
    IO.fromFuture {
      IO(underlying.describeStream(describeStreamRequest))
    }

  def describeStreamPaginator(describeStreamRequest: DescribeStreamRequest): DescribeStreamPublisher =
    underlying.describeStreamPaginator(describeStreamRequest)

  override def getRecords(getRecordsRequest: GetRecordsRequest): IO[GetRecordsResponse] =
    IO.fromFuture {
      IO(underlying.getRecords(getRecordsRequest))
    }

  override def getShardIterator(getShardIteratorRequest: GetShardIteratorRequest): IO[GetShardIteratorResponse] =
    IO.fromFuture {
      IO(underlying.getShardIterator(getShardIteratorRequest))
    }

  override def listStreams(listStreamsRequest: ListStreamsRequest): IO[ListStreamsResponse] =
    IO.fromFuture {
      IO(underlying.listStreams(listStreamsRequest))
    }

  override def listStreams(): IO[ListStreamsResponse] =
    IO.fromFuture {
      IO(underlying.listStreams())
    }

  def listStreamsPaginator(): ListStreamsPublisher =
    underlying.listStreamsPaginator()

  def listStreamsPaginator(listStreamsRequest: ListStreamsRequest): ListStreamsPublisher =
    underlying.listStreamsPaginator(listStreamsRequest)

} 
Example 102
Source File: ProducerOf.scala    From skafka   with MIT License 5 votes vote down vote up
package com.evolutiongaming.skafka.producer

import cats.effect.{Bracket, ContextShift, Effect, Resource}
import cats.{Defer, Monad, ~>}
import com.evolutiongaming.smetrics.MeasureDuration

import scala.concurrent.ExecutionContext

trait ProducerOf[F[_]] {

  def apply(config: ProducerConfig): Resource[F, Producer[F]]
}

object ProducerOf {

  def apply[F[_] : Effect : ContextShift : MeasureDuration](
    executorBlocking: ExecutionContext,
    metrics: Option[ProducerMetrics[F]] = None
  ): ProducerOf[F] = new ProducerOf[F] {

    def apply(config: ProducerConfig) = {
      for {
        producer <- Producer.of[F](config, executorBlocking)
      } yield {
        metrics.fold(producer)(producer.withMetrics[Throwable])
      }
    }
  }


  implicit class ProducerOfOps[F[_]](val self: ProducerOf[F]) extends AnyVal {

    def mapK[G[_] : Monad : Defer](
      fg: F ~> G,
      gf: G ~> F)(implicit
      B: Bracket[F, Throwable]
    ): ProducerOf[G] = new ProducerOf[G] {

      def apply(config: ProducerConfig) = {
        for {
          a <- self(config).mapK(fg)
        } yield {
          a.mapK(fg, gf)
        }
      }
    }
  }
} 
Example 103
Source File: ConsumerOf.scala    From skafka   with MIT License 5 votes vote down vote up
package com.evolutiongaming.skafka.consumer

import cats.effect.{Bracket, Concurrent, ContextShift, Resource}
import cats.{Applicative, Defer, ~>}
import com.evolutiongaming.catshelper.{ToFuture, ToTry}
import com.evolutiongaming.skafka.FromBytes
import com.evolutiongaming.smetrics.MeasureDuration

import scala.concurrent.ExecutionContext

trait ConsumerOf[F[_]] {

  def apply[K, V](
    config: ConsumerConfig)(implicit
    fromBytesK: FromBytes[F, K],
    fromBytesV: FromBytes[F, V]
  ): Resource[F, Consumer[F, K, V]]
}

object ConsumerOf {

  def apply[F[_] : Concurrent : ContextShift : ToTry : ToFuture : MeasureDuration](
    executorBlocking: ExecutionContext,
    metrics: Option[ConsumerMetrics[F]] = None
  ): ConsumerOf[F] = new ConsumerOf[F] {

    def apply[K, V](
      config: ConsumerConfig)(implicit
      fromBytesK: FromBytes[F, K],
      fromBytesV: FromBytes[F, V]
    ) = {
      for {
        consumer <- Consumer.of[F, K, V](config, executorBlocking)
      } yield {
        metrics.fold(consumer)(consumer.withMetrics[Throwable])
      }
    }
  }


  implicit class ConsumerOfOps[F[_]](val self: ConsumerOf[F]) extends AnyVal {

    def mapK[G[_] : Applicative : Defer](
      fg: F ~> G,
      gf: G ~> F)(implicit
      B: Bracket[F, Throwable]
    ): ConsumerOf[G] = new ConsumerOf[G] {

      def apply[K, V](
        config: ConsumerConfig)(implicit
        fromBytesK: FromBytes[G, K],
        fromBytesV: FromBytes[G, V]
      ) = {
        for {
          a <- self[K, V](config)(fromBytesK.mapK(gf), fromBytesV.mapK(gf)).mapK(fg)
        } yield {
          a.mapK(fg, gf)
        }
      }
    }
  }
} 
Example 104
Source File: IOSuite.scala    From skafka   with MIT License 5 votes vote down vote up
package com.evolutiongaming.skafka

import cats.Parallel
import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer}
import cats.implicits._
import com.evolutiongaming.catshelper.FromFuture
import com.evolutiongaming.smetrics.MeasureDuration
import org.scalatest.Succeeded

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

object IOSuite {
  val Timeout: FiniteDuration = 10.seconds

  implicit val executor: ExecutionContextExecutor = ExecutionContext.global

  implicit val contextShiftIO: ContextShift[IO]     = IO.contextShift(executor)
  implicit val concurrentIO: Concurrent[IO]         = IO.ioConcurrentEffect
  implicit val timerIO: Timer[IO]                   = IO.timer(executor)
  implicit val parallelIO: Parallel[IO]             = IO.ioParallel
  implicit val fromFutureIO: FromFuture[IO]         = FromFuture.lift[IO]
  implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock[IO](Clock[IO])

  def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = {
    io.timeout(timeout).as(Succeeded).unsafeToFuture
  }

  implicit class IOOps[A](val self: IO[A]) extends AnyVal {
    def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout)
  }
} 
Example 105
Source File: DeterministicIOTestSuite.scala    From cats-effect-testing   with Apache License 2.0 5 votes vote down vote up
package cats.effect.testing.minitest

import scala.concurrent.ExecutionContext

import cats.effect.{ContextShift, IO, Timer}
import cats.effect.laws.util.TestContext
import scala.concurrent.duration._

import minitest.api.{DefaultExecutionContext, TestSpec}

abstract class DeterministicIOTestSuite extends BaseIOTestSuite[TestContext] {
  override protected final def makeExecutionContext(): TestContext = TestContext()


  override protected[effect] implicit def suiteEc: ExecutionContext = DefaultExecutionContext

  override final implicit def ioContextShift: ContextShift[IO] =
    executionContext.contextShift[IO](IO.ioEffect)
  override final implicit def ioTimer: Timer[IO] = executionContext.timer[IO](IO.ioEffect)


  override protected[effect] def mkSpec(name: String, ec: TestContext, io: => IO[Unit]): TestSpec[Unit, Unit] =
    TestSpec.sync(name, _ => {
      val f = io.unsafeToFuture()
      ec.tick(365.days)
      f.value match {
        case Some(value) => value.get
        case None => throw new RuntimeException(
          s"The IO in ${this.getClass.getName}.$name did not terminate.\n" +
          "It's possible that you are using a ContextShift that is backed by other ExecutionContext or" +
          "the test code is waiting indefinitely."
        )
      }
    })
} 
Example 106
Source File: IOTestSuite.scala    From cats-effect-testing   with Apache License 2.0 5 votes vote down vote up
package cats.effect.testing.utest

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

import cats.effect.{ContextShift, IO, Timer}
import utest._


abstract class IOTestSuite extends TestSuite {
  protected def makeExecutionContext(): ExecutionContext = ExecutionContext.global
  protected def timeout: FiniteDuration = 10.seconds
  protected def allowNonIOTests: Boolean = false

  protected lazy val executionContext: ExecutionContext = makeExecutionContext()

  implicit def ioContextShift: ContextShift[IO] = IO.contextShift(executionContext)
  implicit def ioTimer: Timer[IO] = IO.timer(executionContext)

  override def utestWrap(path: Seq[String], runBody: => Future[Any])(implicit ec: ExecutionContext): Future[Any] = {
    // Shadow the parameter EC with our EC
    implicit val ec: ExecutionContext = this.executionContext
    runBody.flatMap {
      case io: IO[Any] => io.timeout(timeout).unsafeToFuture()
      case other if allowNonIOTests => Future.successful(other)
      case other => throw new RuntimeException(s"Test body must return an IO value. Got $other")
    }
  }
} 
Example 107
Source File: DeterministicIOTestSuite.scala    From cats-effect-testing   with Apache License 2.0 5 votes vote down vote up
package cats.effect.testing.utest

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._

import cats.effect.{ContextShift, IO, Timer}
import cats.effect.laws.util.TestContext
import utest.TestSuite


abstract class DeterministicIOTestSuite extends TestSuite {
  protected val testContext: TestContext = TestContext()
  protected def allowNonIOTests: Boolean = false

  implicit def ioContextShift: ContextShift[IO] = testContext.contextShift(IO.ioEffect)
  implicit def ioTimer: Timer[IO] = testContext.timer(IO.ioEffect)

  override def utestWrap(path: Seq[String], runBody: => Future[Any])(implicit ec: ExecutionContext): Future[Any] = {
    runBody.flatMap {
      case io: IO[Any] =>
        val f = io.unsafeToFuture()
        testContext.tick(365.days)
        assert(testContext.state.tasks.isEmpty)
        f.value match {
          case Some(_) => f
          case None => throw new RuntimeException(
            s"The IO in ${path.mkString(".")} did not terminate.\n" +
            "It's possible that you are using a ContextShift that is backed by other ExecutionContext or" +
            "the test code is waiting indefinitely."
          )
        }
      case other if allowNonIOTests => Future.successful(other)
      case other =>
        throw new RuntimeException(s"Test body must return an IO value. Got $other")
    }(new ExecutionContext {
      def execute(runnable: Runnable): Unit = runnable.run()
      def reportFailure(cause: Throwable): Unit = throw cause
    })
  }
} 
Example 108
Source File: Settings.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package controllers

import java.net.URI

import cats.effect.{Blocker, ContextShift, IO}
import cats.implicits._
import com.typesafe.config.{Config, ConfigFactory}
import play.api.{ConfigLoader, Configuration}
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import vinyldns.core.repository.DataStoreConfig

import scala.collection.JavaConverters._
import scala.concurrent.duration._

// $COVERAGE-OFF$
class Settings(private val config: Configuration) {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  val ldapUser: String = config.get[String]("LDAP.user")
  val ldapPwd: String = config.get[String]("LDAP.password")
  val ldapDomain: String = config.get[String]("LDAP.domain")

  val ldapSearchBase: List[LdapSearchDomain] = config.get[List[LdapSearchDomain]]("LDAP.searchBase")
  val ldapCtxFactory: String = config.get[String]("LDAP.context.initialContextFactory")
  val ldapSecurityAuthentication: String = config.get[String]("LDAP.context.securityAuthentication")
  val ldapProviderUrl: URI = new URI(config.get[String]("LDAP.context.providerUrl"))
  val ldapUserNameAttribute: String =
    config.getOptional[String]("LDAP.userNameAttribute").getOrElse("sAMAccountName")

  val ldapSyncEnabled: Boolean =
    config.getOptional[Boolean]("LDAP.user-sync.enabled").getOrElse(false)
  val ldapSyncPollingInterval: FiniteDuration = config
    .getOptional[Int]("LDAP.user-sync.hours-polling-interval")
    .getOrElse(24)
    .hours

  val portalTestLogin: Boolean = config.getOptional[Boolean]("portal.test_login").getOrElse(false)

  val dataStoreConfigs: IO[List[DataStoreConfig]] =
    Blocker[IO].use { blocker =>
      ConfigSource
        .fromConfig(config.underlying)
        .at("data-stores")
        .loadF[IO, List[String]](blocker)
        .flatMap { lst =>
          lst
            .map(
              ConfigSource.fromConfig(config.underlying).at(_).loadF[IO, DataStoreConfig](blocker)
            )
            .parSequence
        }
    }

  val cryptoConfig = IO(config.get[Config]("crypto"))

  implicit def ldapSearchDomainLoader: ConfigLoader[List[LdapSearchDomain]] =
    new ConfigLoader[List[LdapSearchDomain]] {
      def load(config: Config, path: String): List[LdapSearchDomain] = {
        val domains = config.getConfigList(path).asScala.map { domainConfig ⇒
          val org = domainConfig.getString("organization")
          val domain = domainConfig.getString("domainName")
          LdapSearchDomain(org, domain)
        }
        domains.toList
      }
    }
}
// $COVERAGE-ON$
object Settings extends Settings(Configuration(ConfigFactory.load())) 
Example 109
Source File: UserAccountAccessor.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package controllers

import cats.effect.{ContextShift, IO}
import cats.implicits._
import javax.inject.{Inject, Singleton}
import org.joda.time.DateTime
import vinyldns.core.domain.membership._

@Singleton
class UserAccountAccessor @Inject() (users: UserRepository, changes: UserChangeRepository) {

  implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global)

  
  def get(identifier: String): IO[Option[User]] =
    users
      .getUser(identifier)
      .flatMap {
        case None => users.getUserByName(identifier)
        case found => IO(found)
      }

  def create(user: User): IO[User] =
    for {
      _ <- users.save(user)
      _ <- changes.save(UserChange.CreateUser(user, "system", DateTime.now))
    } yield user

  def update(user: User, oldUser: User): IO[User] =
    for {
      _ <- users.save(user)
      _ <- changes.save(UserChange.UpdateUser(user, "system", DateTime.now, oldUser))
    } yield user

  def getUserByKey(key: String): IO[Option[User]] =
    users.getUserByAccessKey(key)

  def getAllUsers: IO[List[User]] =
    users.getAllUsers

  def lockUsers(usersToLock: List[User]): IO[List[User]] = {
    val currentTime = DateTime.now
    for {
      lockedUsers <- users.save(usersToLock.map(_.copy(lockStatus = LockStatus.Locked)))
      _ <- usersToLock
        .zip(lockedUsers)
        .map {
          case (oldUser, newUser) =>
            changes.save(UserChange.UpdateUser(newUser, "system", currentTime, oldUser))
        }
        .parSequence
    } yield lockedUsers
  }
} 
Example 110
Source File: APIMetrics.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.metrics
import java.util.concurrent.TimeUnit

import cats.effect.{Blocker, ContextShift, IO}
import com.codahale.metrics.Slf4jReporter.LoggingLevel
import com.codahale.metrics.{Metric, MetricFilter, ScheduledReporter, Slf4jReporter}
import com.typesafe.config.Config
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import vinyldns.core.VinylDNSMetrics

final case class MemoryMetricsSettings(logEnabled: Boolean, logSeconds: Int)
final case class APIMetricsSettings(memory: MemoryMetricsSettings)

object APIMetrics {
  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  // Output all memory metrics to the log, do not start unless configured
  private val logReporter = Slf4jReporter
    .forRegistry(VinylDNSMetrics.metricsRegistry)
    .filter(new MetricFilter {
      def matches(name: String, metric: Metric): Boolean =
        name.startsWith("memory")
    })
    .withLoggingLevel(LoggingLevel.INFO)
    .outputTo(LoggerFactory.getLogger("MemStats"))
    .convertRatesTo(TimeUnit.SECONDS)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .build()

  def initialize(
      settings: APIMetricsSettings,
      reporter: ScheduledReporter = logReporter
  ): IO[Unit] = IO {
    if (settings.memory.logEnabled) {
      reporter.start(settings.memory.logSeconds, TimeUnit.SECONDS)
    }
  }

  def loadSettings(config: Config): IO[APIMetricsSettings] =
    Blocker[IO].use(
      ConfigSource.fromConfig(config).loadF[IO, APIMetricsSettings](_)
    )
} 
Example 111
Source File: EmailNotifierProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.notifier.email

import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.{Blocker, ContextShift, IO}
import javax.mail.Session

class EmailNotifierProvider extends NotifierProvider {
  import EmailNotifierConfig._

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
    for {
      emailConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, EmailNotifierConfig](_)
      )
      session <- createSession(emailConfig)
    } yield new EmailNotifier(emailConfig, session, userRepository)

  def createSession(config: EmailNotifierConfig): IO[Session] = IO {
    Session.getInstance(config.smtp)
  }
} 
Example 112
Source File: SnsNotifierProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.notifier.sns

import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.{Blocker, ContextShift, IO}
import com.amazonaws.services.sns.AmazonSNS
import org.slf4j.LoggerFactory
import com.amazonaws.services.sns.AmazonSNSClientBuilder
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.auth.AWSStaticCredentialsProvider
import com.amazonaws.auth.BasicAWSCredentials

class SnsNotifierProvider extends NotifierProvider {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)
  private val logger = LoggerFactory.getLogger(classOf[SnsNotifierProvider])

  def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
    for {
      snsConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, SnsNotifierConfig](_)
      )
      client <- createClient(snsConfig)
    } yield new SnsNotifier(snsConfig, client)

  def createClient(config: SnsNotifierConfig): IO[AmazonSNS] = IO {
    logger.error(
      "Setting up sns notifier client with settings: " +
        s"service endpoint: ${config.serviceEndpoint}; " +
        s"signing region: ${config.signingRegion}; " +
        s"topic name: ${config.topicArn}"
    )
    AmazonSNSClientBuilder.standard
      .withEndpointConfiguration(
        new EndpointConfiguration(config.serviceEndpoint, config.signingRegion)
      )
      .withCredentials(
        new AWSStaticCredentialsProvider(
          new BasicAWSCredentials(config.accessKey, config.secretKey)
        )
      )
      .build()
  }

} 
Example 113
Source File: StatusRoutingSpec.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.route

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.testkit.ScalatestRouteTest
import cats.effect.{ContextShift, IO}
import fs2.concurrent.SignallingRef
import org.scalatest._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.mockito.MockitoSugar

class StatusRoutingSpec
    extends AnyWordSpec
    with ScalatestRouteTest
    with StatusRoute
    with OneInstancePerTest
    with VinylDNSJsonProtocol
    with BeforeAndAfterEach
    with MockitoSugar
    with Matchers {

  def actorRefFactory: ActorSystem = system

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  val processingDisabled: SignallingRef[IO, Boolean] =
    fs2.concurrent.SignallingRef[IO, Boolean](false).unsafeRunSync()

  "GET /status" should {
    "return the current status of true" in {
      Get("/status") ~> statusRoute ~> check {
        response.status shouldBe StatusCodes.OK
        val resultStatus = responseAs[CurrentStatus]
        resultStatus.processingDisabled shouldBe false
        resultStatus.color shouldBe "blue"
        resultStatus.keyName shouldBe "vinyldns."
        resultStatus.version shouldBe "unset"
      }
    }
  }

  "POST /status" should {
    "disable processing" in {
      Post("/status?processingDisabled=true") ~> statusRoute ~> check {
        response.status shouldBe StatusCodes.OK
        val resultStatus = responseAs[CurrentStatus]
        resultStatus.processingDisabled shouldBe true
      }
    }

    "enable processing" in {
      Post("/status?processingDisabled=false") ~> statusRoute ~> check {
        response.status shouldBe StatusCodes.OK
        val resultStatus = responseAs[CurrentStatus]
        resultStatus.processingDisabled shouldBe false

        // remember, the signal is the opposite of intent
        processingDisabled.get.unsafeRunSync() shouldBe false
      }
    }
  }
} 
Example 114
Source File: SqsMessageQueueProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.sqs.queue
import cats.effect.IO
import cats.implicits._
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sqs.model.QueueDoesNotExistException
import com.amazonaws.services.sqs.{AmazonSQSAsync, AmazonSQSAsyncClientBuilder}
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.Blocker
import vinyldns.core.queue.{MessageQueue, MessageQueueConfig, MessageQueueProvider}

import scala.util.matching.Regex
import cats.effect.ContextShift

class SqsMessageQueueProvider extends MessageQueueProvider {
  import SqsMessageQueueProvider._

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: MessageQueueConfig): IO[MessageQueue] =
    for {
      settingsConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, SqsMessageQueueSettings](_)
      )
      _ <- IO.fromEither(validateQueueName(settingsConfig.queueName))
      client <- setupClient(settingsConfig)
      queueUrl <- setupQueue(client, settingsConfig.queueName)
      _ <- IO(logger.error(s"Queue URL: $queueUrl\n"))
    } yield new SqsMessageQueue(queueUrl, client)

  def validateQueueName(queueName: String): Either[InvalidQueueName, String] = {

    
    val validQueueNameRegex: Regex = """^([\w\-]{1,80})$""".r

    validQueueNameRegex
      .findFirstIn(queueName)
      .map(Right(_))
      .getOrElse(Left(InvalidQueueName(queueName)))
  }

  def setupClient(sqsMessageQueueSettings: SqsMessageQueueSettings): IO[AmazonSQSAsync] =
    IO {
      logger.error(
        s"Setting up queue client with settings: " +
          s"service endpoint: ${sqsMessageQueueSettings.serviceEndpoint}; " +
          s"signing region: ${sqsMessageQueueSettings.serviceEndpoint}; " +
          s"queue name: ${sqsMessageQueueSettings.queueName}"
      )
      AmazonSQSAsyncClientBuilder
        .standard()
        .withEndpointConfiguration(
          new EndpointConfiguration(
            sqsMessageQueueSettings.serviceEndpoint,
            sqsMessageQueueSettings.signingRegion
          )
        )
        .withCredentials(
          new AWSStaticCredentialsProvider(
            new BasicAWSCredentials(
              sqsMessageQueueSettings.accessKey,
              sqsMessageQueueSettings.secretKey
            )
          )
        )
        .build()
    }

  def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] =
    // Create queue if it doesn't exist
    IO {
      logger.error(s"Setting up queue with name [$queueName]")
      client.getQueueUrl(queueName).getQueueUrl
    }.recoverWith {
      case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl)
    }
}

object SqsMessageQueueProvider {
  final case class InvalidQueueName(queueName: String)
      extends Throwable(
        s"Invalid queue name: $queueName. Must be 1-80 alphanumeric, hyphen or underscore characters. FIFO queues " +
          "(queue names ending in \".fifo\") are not supported."
      )

  private val logger = LoggerFactory.getLogger(classOf[SqsMessageQueueProvider])
} 
Example 115
Source File: DynamoDBDataStoreProviderIntegrationSpec.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.dynamodb.repository

import cats.effect.{ContextShift, IO}
import cats.implicits._
import com.amazonaws.services.dynamodbv2.model.DeleteTableRequest
import com.typesafe.config.{Config, ConfigFactory}
import vinyldns.core.crypto.{CryptoAlgebra, NoOpCrypto}
import vinyldns.core.domain.batch.BatchChangeRepository
import vinyldns.core.domain.membership._
import vinyldns.core.domain.record.{RecordChangeRepository, RecordSetRepository}
import vinyldns.core.domain.zone.{ZoneChangeRepository, ZoneRepository}
import vinyldns.core.repository.{DataStore, DataStoreConfig, LoadedDataStore}
import vinyldns.core.repository.RepositoryName._
import pureconfig._
import pureconfig.generic.auto._

class DynamoDBDataStoreProviderIntegrationSpec extends DynamoDBIntegrationSpec {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)
  val config: Config = ConfigFactory.load()
  val dynamoDBConfig: DataStoreConfig =
    ConfigSource.fromConfig(config).at("dynamodb").loadOrThrow[DataStoreConfig]

  val provider: DynamoDBDataStoreProvider = new DynamoDBDataStoreProvider()
  val crypto: CryptoAlgebra = new NoOpCrypto()

  logger.info("Loading all dynamodb tables in DynamoDBDataStoreProviderSpec")
  val providerLoad: LoadedDataStore = provider.load(dynamoDBConfig, crypto).unsafeRunSync()
  val dataStore: DataStore = providerLoad.dataStore
  logger.info("DynamoDBDataStoreProviderSpec load complete")

  def setup(): Unit = ()

  def tearDown(): Unit = {
    val deletes = dynamoDBConfig.repositories.configMap.map {
      case (_, config) => {
        val asDynamo = ConfigSource.fromConfig(config).loadOrThrow[DynamoDBRepositorySettings]
        val request = new DeleteTableRequest().withTableName(asDynamo.tableName)
        testDynamoDBHelper.deleteTable(request)
      }
    }
    logger.info("Deleting all tables created by provider in DynamoDBDataStoreProviderSpec")
    deletes.toList.parSequence.unsafeRunSync()
    logger.info("DynamoDBDataStoreProviderSpec delete complete")
  }

  "DynamoDBDataStoreProvider" should {
    "properly load configured repos" in {
      dataStore.get[GroupRepository](group) shouldBe defined
      dataStore.get[MembershipRepository](membership) shouldBe defined
      dataStore.get[GroupChangeRepository](groupChange) shouldBe defined
      dataStore.get[RecordChangeRepository](recordChange) shouldBe defined
      dataStore.get[ZoneChangeRepository](zoneChange) shouldBe defined
    }
    "not load configured off repos" in {
      dataStore.get[ZoneRepository](zone) shouldBe empty
      dataStore.get[BatchChangeRepository](batchChange) shouldBe empty
      dataStore.get[RecordSetRepository](recordSet) shouldBe empty
    }
    "validate a loaded repo works" in {
      val testGroup = Group(
        "provider-load-test-group-name",
        "[email protected]",
        Some("some description"),
        "testGroupId",
        adminUserIds = Set("testUserId"),
        memberIds = Set("testUserId")
      )
      val groupRepo = dataStore.get[GroupRepository](group)

      val save = groupRepo.map(_.save(testGroup)).sequence[IO, Group]
      save.unsafeRunSync() shouldBe Some(testGroup)

      val get = groupRepo.map(_.getGroup(testGroup.id)).sequence[IO, Option[Group]]
      get.unsafeRunSync().flatten shouldBe Some(testGroup)
    }
    "include a health check IO" in {
      providerLoad.healthCheck.unsafeRunSync() shouldBe ().asRight
    }
  }
} 
Example 116
Source File: MySqlMessageQueueProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.mysql.queue

import cats.effect.IO
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import scalikejdbc.{ConnectionPool, DataSourceConnectionPool}
import scalikejdbc.config.DBs
import vinyldns.core.queue.{MessageQueue, MessageQueueConfig, MessageQueueProvider}
import vinyldns.mysql.{HikariCloser, MySqlConnectionConfig, MySqlDataSourceSettings}
import vinyldns.mysql.MySqlConnector._
import cats.effect.ContextShift
import cats.effect.Blocker

class MySqlMessageQueueProvider extends MessageQueueProvider {

  private val logger = LoggerFactory.getLogger(classOf[MySqlMessageQueueProvider])

  implicit val mySqlPropertiesReader: ConfigReader[Map[String, AnyRef]] =
    MySqlConnectionConfig.mySqlPropertiesReader

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: MessageQueueConfig): IO[MessageQueue] =
    for {
      connectionSettings <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, MySqlConnectionConfig](_)
      )
      _ <- runDBMigrations(connectionSettings)
      _ <- setupQueueConnection(connectionSettings)
    } yield new MySqlMessageQueue(config.maxRetries)

  def setupQueueConnection(config: MySqlConnectionConfig): IO[Unit] = {
    val queueConnectionSettings = MySqlDataSourceSettings(config, "mysqlQueuePool")

    getDataSource(queueConnectionSettings).map { dataSource =>
      logger.error("configuring connection pool for queue")

      // note this is being called 2x in the case you use the mysql datastores and
      // loader. That should be ok
      DBs.loadGlobalSettings()

      // Configure the connection pool
      ConnectionPool.add(
        MySqlMessageQueue.QUEUE_CONNECTION_NAME,
        new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
      )

      logger.error("queue connection pool init complete")
    }
  }

} 
Example 117
Source File: HealthService.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.core.health

import cats.effect.{ContextShift, IO}
import cats.implicits._
import org.slf4j.LoggerFactory
import vinyldns.core.health.HealthCheck.{HealthCheck, HealthCheckError}

class HealthService(healthChecks: List[HealthCheck]) {

  private val logger = LoggerFactory.getLogger(classOf[HealthService])

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def checkHealth(): IO[List[HealthCheckError]] =
    healthChecks.parSequence
      .map {
        _.collect {
          case Left(err) =>
            logger.error(s"Health Check Failure: ${err.message}")
            err
        }
      }
} 
Example 118
Source File: AllNotifiers.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.core.notifier

import cats.effect.{ContextShift, IO}
import cats.implicits._
import org.slf4j.LoggerFactory
import vinyldns.core.route.Monitored

final case class AllNotifiers(notifiers: List[Notifier])(implicit val cs: ContextShift[IO])
    extends Monitored {

  private val logger = LoggerFactory.getLogger("AllNotifiers")

  def notify(notification: Notification[_]): IO[Unit] =
    for {
      _ <- notifiers.parTraverse(notify(_, notification))
    } yield ()

  def notify(notifier: Notifier, notification: Notification[_]): IO[Unit] =
    monitor(notifier.getClass.getSimpleName) {
      notifier.notify(notification).handleErrorWith { e =>
        IO {
          logger.error(s"Notifier ${notifier.getClass.getSimpleName} failed.", e)
        }
      }
    }
} 
Example 119
Source File: NotifierLoader.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.core.notifier
import vinyldns.core.domain.membership.UserRepository
import cats.effect.IO
import cats.implicits._
import cats.effect.ContextShift

object NotifierLoader {

  def loadAll(configs: List[NotifierConfig], userRepository: UserRepository)(
      implicit cs: ContextShift[IO]
  ): IO[AllNotifiers] =
    for {
      notifiers <- configs.parTraverse(load(_, userRepository))
    } yield AllNotifiers(notifiers)

  def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
    for {
      provider <- IO(
        Class
          .forName(config.className)
          .getDeclaredConstructor()
          .newInstance()
          .asInstanceOf[NotifierProvider]
      )
      notifier <- provider.load(config, userRepository)
    } yield notifier

} 
Example 120
Source File: AllNotifiersSpec.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.core.notifier

import cats.scalatest.{EitherMatchers, EitherValues, ValidatedMatchers}
import org.scalatestplus.mockito.MockitoSugar
import org.mockito.Mockito._
import cats.effect.IO
import org.scalatest.BeforeAndAfterEach
import cats.effect.ContextShift
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

class AllNotifiersSpec
    extends AnyWordSpec
    with Matchers
    with MockitoSugar
    with EitherValues
    with EitherMatchers
    with ValidatedMatchers
    with BeforeAndAfterEach {

  implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global)

  val mockNotifiers = List.fill(3)(mock[Notifier])

  val notification = Notification("anything")

  override def beforeEach: Unit =
    mockNotifiers.foreach { mock =>
      reset(mock)
      when(mock.notify(notification)).thenReturn(IO.unit)
    }

  "notifier" should {
    "notify all contained notifiers" in {

      val notifier = AllNotifiers(mockNotifiers)

      notifier.notify(notification)

      mockNotifiers.foreach(verify(_).notify(notification))
    }

    "suppress errors from notifiers" in {
      val notifier = AllNotifiers(mockNotifiers)

      when(mockNotifiers(2).notify(notification)).thenReturn(IO.raiseError(new Exception("fail")))

      notifier.notify(notification).unsafeRunSync()

      mockNotifiers.foreach(verify(_).notify(notification))
    }
  }

} 
Example 121
Source File: TaskSchedulerSpec.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.core.task
import cats.effect.{ContextShift, IO, Timer}
import org.mockito.Mockito
import org.mockito.Mockito._
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterEach

import scala.concurrent.duration._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

class TaskSchedulerSpec
    extends AnyWordSpec
    with Matchers
    with MockitoSugar
    with BeforeAndAfterEach {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)
  private implicit val timer: Timer[IO] = IO.timer(scala.concurrent.ExecutionContext.global)

  private val mockRepo = mock[TaskRepository]

  class TestTask(
      val name: String,
      val timeout: FiniteDuration,
      val runEvery: FiniteDuration,
      val checkInterval: FiniteDuration,
      testResult: IO[Unit] = IO.unit
  ) extends Task {
    def run(): IO[Unit] = testResult
  }

  override def beforeEach() = Mockito.reset(mockRepo)

  "TaskScheduler" should {
    "run a scheduled task" in {
      val task = new TestTask("test", 5.seconds, 500.millis, 500.millis)
      val spied = spy(task)
      doReturn(IO.unit).when(mockRepo).saveTask(task.name)
      doReturn(IO.pure(true)).when(mockRepo).claimTask(task.name, task.timeout, task.runEvery)
      doReturn(IO.unit).when(mockRepo).releaseTask(task.name)

      TaskScheduler.schedule(spied, mockRepo).take(1).compile.drain.unsafeRunSync()

      // We run twice because we run once on start up
      verify(spied, times(2)).run()
      verify(mockRepo, times(2)).claimTask(task.name, task.timeout, task.runEvery)
      verify(mockRepo, times(2)).releaseTask(task.name)
    }

    "release the task even on error" in {
      val task =
        new TestTask(
          "test",
          5.seconds,
          500.millis,
          500.millis,
          IO.raiseError(new RuntimeException("fail"))
        )
      doReturn(IO.unit).when(mockRepo).saveTask(task.name)
      doReturn(IO.pure(true)).when(mockRepo).claimTask(task.name, task.timeout, task.runEvery)
      doReturn(IO.unit).when(mockRepo).releaseTask(task.name)

      TaskScheduler.schedule(task, mockRepo).take(1).compile.drain.unsafeRunSync()

      // We release the task twice, once on start and once on the run
      verify(mockRepo, times(2)).releaseTask(task.name)
    }

    "fail to start if the task cannot be saved" in {
      val task = new TestTask("test", 5.seconds, 500.millis, 500.millis)
      val spied = spy(task)
      doReturn(IO.raiseError(new RuntimeException("fail"))).when(mockRepo).saveTask(task.name)

      a[RuntimeException] should be thrownBy TaskScheduler
        .schedule(task, mockRepo)
        .take(1)
        .compile
        .drain
        .unsafeRunSync()
      verify(spied, never()).run()
    }
  }
} 
Example 122
Source File: deployment.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.transaction
import java.util.UUID

import aecor.example.common.Timestamp
import aecor.example.transaction.transaction.Transactions
import aecor.runtime.Eventsourced
import aecor.runtime.akkapersistence.AkkaPersistenceRuntime
import aecor.util.Clock
import cats.implicits._
import cats.effect.{ ContextShift, Effect }
import scodec.codecs.implicits._

object deployment {
  def deploy[F[_]: Effect: ContextShift](runtime: AkkaPersistenceRuntime[UUID],
                                         clock: Clock[F]): F[Transactions[F]] =
    runtime
      .deploy(
        "Transaction",
        EventsourcedAlgebra.behavior[F].enrich(clock.instant.map(Timestamp(_))),
        EventsourcedAlgebra.tagging
      )
      .map(Eventsourced.Entities.rejectable(_))
} 
Example 123
Source File: deployment.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.account
import java.util.UUID

import aecor.example.common.Timestamp
import aecor.runtime.Eventsourced
import aecor.runtime.akkapersistence.AkkaPersistenceRuntime
import aecor.util.Clock
import cats.effect.{ ContextShift, Effect }
import cats.implicits._

object deployment {
  def deploy[F[_]: Effect: ContextShift](runtime: AkkaPersistenceRuntime[UUID],
                                         clock: Clock[F]): F[Accounts[F]] =
    runtime
      .deploy(
        "Account",
        EventsourcedAlgebra.behavior[F].enrich(clock.instant.map(Timestamp(_))),
        EventsourcedAlgebra.tagging
      )
      .map(Eventsourced.Entities.rejectable(_))
} 
Example 124
Source File: DistributedProcessing.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.kafkadistributedprocessing

import java.util.Properties

import aecor.kafkadistributedprocessing.internal.Kafka
import aecor.kafkadistributedprocessing.internal.Kafka._
import cats.effect.{ ConcurrentEffect, ContextShift, Timer }
import cats.implicits._
import cats.effect.implicits._
import fs2.Stream
import org.apache.kafka.clients.consumer.ConsumerConfig

import scala.concurrent.duration._

final class DistributedProcessing(settings: DistributedProcessingSettings) {

  private def assignRange(size: Int, partitionCount: Int, partition: Int): Option[(Int, Int)] = {
    val even = size / partitionCount
    val reminder = size % partitionCount
    if (partition >= partitionCount) {
      none
    } else {
      if (partition < reminder) {
        (partition * (even + 1), even + 1).some
      } else if (even > 0) {
        (reminder + partition * even, even).some
      } else none
    }
  }

  
  def start[F[_]: ConcurrentEffect: Timer: ContextShift](name: String,
                                                         processes: List[F[Unit]]): F[Unit] =
    Kafka
      .assignPartitions(
        settings.asProperties(name),
        settings.topicName,
        settings.pollingInterval,
        settings.pollTimeout
      )
      .parEvalMapUnordered(Int.MaxValue) {
        case AssignedPartition(partition, partitionCount, watchRevocation, release) =>
          assignRange(processes.size, partitionCount, partition).fold(release) {
            case (offset, processCount) =>
              Stream
                .range[F](offset, offset + processCount)
                .parEvalMapUnordered(processCount)(processes)
                .compile
                .drain
                .race(watchRevocation)
                .flatMap {
                  case Left(_)         => release
                  case Right(callback) => callback
                }
          }
      }
      .compile
      .drain
}

object DistributedProcessing {
  def apply(settings: DistributedProcessingSettings): DistributedProcessing =
    new DistributedProcessing(settings)
}

final case class DistributedProcessingSettings(brokers: Set[String],
                                               topicName: String,
                                               pollingInterval: FiniteDuration = 500.millis,
                                               pollTimeout: FiniteDuration = 50.millis,
                                               consumerSettings: Map[String, String] = Map.empty) {
  def withClientId(clientId: String): DistributedProcessingSettings =
    withConsumerSetting(ConsumerConfig.CLIENT_ID_CONFIG, clientId)

  def clientId: Option[String] = consumerSettings.get(ConsumerConfig.CLIENT_ID_CONFIG)

  def withConsumerSetting(key: String, value: String): DistributedProcessingSettings =
    copy(consumerSettings = consumerSettings.updated(key, value))

  def asProperties(groupId: String): Properties = {
    val properties = new Properties()
    consumerSettings.foreach {
      case (key, value) => properties.setProperty(key, value)
    }
    properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers.mkString(","))
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId)
    properties
  }

} 
Example 125
Source File: KafkaConsumer.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.kafkadistributedprocessing.internal

import java.time.Duration
import java.util.Properties
import java.util.concurrent.Executors

import cats.effect.{ Async, ContextShift, Resource }
import cats.~>
import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRebalanceListener, ConsumerRecords }
import org.apache.kafka.common.PartitionInfo
import org.apache.kafka.common.serialization.Deserializer

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration

private[kafkadistributedprocessing] final class KafkaConsumer[F[_], K, V](
  withConsumer: (Consumer[K, V] => *) ~> F
) {

  def subscribe(topics: Set[String], listener: ConsumerRebalanceListener): F[Unit] =
    withConsumer(_.subscribe(topics.asJava, listener))

  def subscribe(topics: Set[String]): F[Unit] =
    withConsumer(_.subscribe(topics.asJava))

  val unsubscribe: F[Unit] =
    withConsumer(_.unsubscribe())

  def partitionsFor(topic: String): F[Set[PartitionInfo]] =
    withConsumer(_.partitionsFor(topic).asScala.toSet)

  def close: F[Unit] =
    withConsumer(_.close())

  def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] =
    withConsumer(_.poll(Duration.ofNanos(timeout.toNanos)))
}

private[kafkadistributedprocessing] object KafkaConsumer {
  final class Create[F[_]] {
    def apply[K, V](
      config: Properties,
      keyDeserializer: Deserializer[K],
      valueDeserializer: Deserializer[V]
    )(implicit F: Async[F], contextShift: ContextShift[F]): Resource[F, KafkaConsumer[F, K, V]] = {
      val create = F.suspend {

        val executor = Executors.newSingleThreadExecutor()

        def eval[A](a: => A): F[A] =
          contextShift.evalOn(ExecutionContext.fromExecutor(executor)) {
            F.async[A] { cb =>
              executor.execute(new Runnable {
                override def run(): Unit =
                  cb {
                    try Right(a)
                    catch {
                      case e: Throwable => Left(e)
                    }
                  }
              })
            }
          }

        eval {
          val original = Thread.currentThread.getContextClassLoader
          Thread.currentThread.setContextClassLoader(null)
          val consumer = new org.apache.kafka.clients.consumer.KafkaConsumer[K, V](
            config,
            keyDeserializer,
            valueDeserializer
          )
          Thread.currentThread.setContextClassLoader(original)
          val withConsumer = new ((Consumer[K, V] => *) ~> F) {
            def apply[A](f: Consumer[K, V] => A): F[A] =
              eval(f(consumer))
          }
          new KafkaConsumer[F, K, V](withConsumer)
        }
      }
      Resource.make(create)(_.close)
    }
  }
  def create[F[_]]: Create[F] = new Create[F]
} 
Example 126
Source File: PeriodicProcessRuntime.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.schedule.process

import aecor.distributedprocessing.{ AkkaStreamProcess, DistributedProcessing }
import aecor.util.effect._
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import cats.effect.{ ContextShift, Effect }

import scala.collection.immutable._
import scala.concurrent.duration.{ FiniteDuration, _ }

object PeriodicProcessRuntime {
  def apply[F[_]: Effect: ContextShift](
    name: String,
    tickInterval: FiniteDuration,
    processCycle: F[Unit]
  )(implicit materializer: Materializer): PeriodicProcessRuntime[F] =
    new PeriodicProcessRuntime[F](name, tickInterval, processCycle)
}

class PeriodicProcessRuntime[F[_]: Effect: ContextShift](
  name: String,
  tickInterval: FiniteDuration,
  processCycle: F[Unit]
)(implicit materializer: Materializer) {

  private def source =
    Source
      .tick(0.seconds, tickInterval, processCycle)
      .mapAsync(1)(_.unsafeToFuture())
      .mapMaterializedValue(_ => NotUsed)

  def run(system: ActorSystem): F[DistributedProcessing.KillSwitch[F]] =
    DistributedProcessing(system)
      .start[F](s"$name-Process", List(AkkaStreamProcess[F](source)))

} 
Example 127
Source File: Session.scala    From aecor   with MIT License 5 votes vote down vote up
package akka.persistence.cassandra
import java.util.concurrent.Executor

import cats.data.Kleisli
import cats.effect.{ Async, ContextShift }
import com.datastax.driver.core.{ ResultSet, TypeCodec, Session => DatastaxSession }

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

trait Session[F[_]] {
  def execute(query: String): F[ResultSet]
  def registerCodec[A](codec: TypeCodec[A]): F[Unit]
}

object Session {
  type Init[F[_]] = Kleisli[F, Session[F], Unit]
  def Init[F[_]](f: Session[F] => F[Unit]): Init[F] = Kleisli(f)
  private val immediateExecutor = new Executor {
    override def execute(command: Runnable): Unit =
      command.run()
  }

  private val immediateExecutionContext = ExecutionContext.fromExecutor(immediateExecutor)

  def apply[F[_]](datastaxSession: DatastaxSession)(implicit F: Async[F],
                                                    contextShift: ContextShift[F]): Session[F] =
    new Session[F] {
      final override def execute(query: String): F[ResultSet] =
        contextShift.evalOn(immediateExecutionContext) {
          F.async { cb =>
            val future = datastaxSession.executeAsync(query)
            val runnable = new Runnable {
              override def run(): Unit =
                try {
                  cb(Right(future.get()))
                } catch {
                  case NonFatal(e) =>
                    cb(Left(e))
                }
            }
            future.addListener(runnable, immediateExecutor)
          }
        }
      override def registerCodec[A](codec: TypeCodec[A]): F[Unit] =
        F.delay {
          datastaxSession.getCluster.getConfiguration.getCodecRegistry.register(codec)
          ()
        }
    }
} 
Example 128
Source File: DefaultJournalCassandraSession.scala    From aecor   with MIT License 5 votes vote down vote up
package akka.persistence.cassandra

import akka.Done
import akka.actor.{ ActorSystem, ExtendedActorSystem }
import akka.event.Logging
import akka.persistence.cassandra.Session.Init
import akka.persistence.cassandra.session.CassandraSessionSettings
import akka.persistence.cassandra.session.scaladsl.CassandraSession
import cats.effect.{ ContextShift, Effect }
import cats.implicits._

object DefaultJournalCassandraSession {

  
  def apply[F[_]: ContextShift](
    system: ActorSystem,
    metricsCategory: String,
    init: Init[F],
    sessionProvider: Option[SessionProvider] = None
  )(implicit F: Effect[F]): F[CassandraSession] = F.delay {
    val log = Logging(system, classOf[CassandraSession])
    val provider = sessionProvider.getOrElse(
      SessionProvider(
        system.asInstanceOf[ExtendedActorSystem],
        system.settings.config.getConfig("cassandra-journal")
      )
    )
    val settings = CassandraSessionSettings(system.settings.config.getConfig("cassandra-journal"))
    new CassandraSession(system, provider, settings, system.dispatcher, log, metricsCategory, { x =>
      F.toIO(init(Session[F](x)).as(Done)).unsafeToFuture()
    })
  }
} 
Example 129
Source File: Timeout.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu

import cats.effect.{Concurrent, ContextShift, IO, Timer}
import simulacrum.typeclass
import tofu.syntax.feither._
import tofu.internal.NonTofu

import scala.concurrent.duration.FiniteDuration

@typeclass
trait Timeout[F[_]] {
  def timeoutTo[A](fa: F[A], after: FiniteDuration, fallback: F[A]): F[A]
}

object Timeout extends LowPriorTimeoutImplicits {
  implicit def io(implicit timer: Timer[IO], cs: ContextShift[IO]): Timeout[IO] = new Timeout[IO] {
    override def timeoutTo[A](fa: IO[A], after: FiniteDuration, fallback: IO[A]): IO[A] = fa.timeoutTo(after, fallback)
  }
}

trait LowPriorTimeoutImplicits { self: Timeout.type =>
  implicit def concurrent[F[_]: NonTofu](implicit F: Concurrent[F], timer: Timer[F]): Timeout[F] =
    new Timeout[F] {
      override def timeoutTo[A](fa: F[A], after: FiniteDuration, fallback: F[A]): F[A] =
        F.race(timer.sleep(after), fa).getOrElseF(fallback)
    }
} 
Example 130
Source File: Execute.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu

import cats.effect.{Async, ContextShift}

import tofu.syntax.monadic._
import scala.concurrent.{ExecutionContext, Future}

import simulacrum.typeclass

@typeclass
trait Execute[F[_]] {
  def executionContext: F[ExecutionContext]

  def deferFutureAction[A](f: ExecutionContext => Future[A]): F[A]

  def deferFuture[A](f: => Future[A]): F[A] = deferFutureAction(_ => f)
}

object Execute {
  final implicit def asyncExecute[F[_]](implicit
      ec: ExecutionContext,
      cs: ContextShift[F],
      asyncF: Async[F]
  ): Execute[F] = new Execute[F] {
    def executionContext: F[ExecutionContext]                        = ec.pure[F]
    def deferFutureAction[A](f: ExecutionContext => Future[A]): F[A] =
      Async.fromFuture(asyncF.delay(f(ec)))
  }
} 
Example 131
Source File: ShiftingProducerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import cats.effect.{Async, ContextShift}
import java.util.concurrent.{Future => JFuture}

import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._

import scala.concurrent.ExecutionContext

case class ShiftingProducerImpl[F[_]: Async, K, V](
    p: ProducerApi[F, K, V],
    blockingContext: ExecutionContext
)(implicit CS: ContextShift[F])
    extends ProducerApi[F, K, V] {
  def abortTransaction: F[Unit] = CS.evalOn(blockingContext)(p.abortTransaction)
  def beginTransaction: F[Unit] = CS.evalOn(blockingContext)(p.beginTransaction)
  def close: F[Unit] = CS.evalOn(blockingContext)(p.close)
  def close(timeout: FiniteDuration): F[Unit] = CS.evalOn(blockingContext)(p.close(timeout))
  def commitTransaction: F[Unit] = CS.evalOn(blockingContext)(p.commitTransaction)
  def flush: F[Unit] = CS.evalOn(blockingContext)(p.flush)
  def initTransactions: F[Unit] = CS.evalOn(blockingContext)(p.initTransactions)
  def metrics: F[Map[MetricName, Metric]] = CS.evalOn(blockingContext)(p.metrics)
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(p.partitionsFor(topic))
  def sendOffsetsToTransaction(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  ): F[Unit] =
    CS.evalOn(blockingContext)(p.sendOffsetsToTransaction(offsets, consumerGroupId))

  private[producer] def sendRaw(record: ProducerRecord[K, V]): JFuture[RecordMetadata] =
    p.sendRaw(record)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Callback
  ): JFuture[RecordMetadata] = p.sendRaw(record, callback)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Either[Exception, RecordMetadata] => Unit
  ): Unit = p.sendRaw(record, callback)

  def sendAndForget(record: ProducerRecord[K, V]): F[Unit] =
    CS.evalOn(blockingContext)(p.sendAndForget(record))
  def sendSync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    CS.evalOn(blockingContext)(p.sendSync(record))
  def sendAsync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    CS.evalOn(blockingContext)(p.sendAsync(record))
} 
Example 132
Source File: ShiftingConsumerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.consumer

import cats.effect.{Async, ContextShift}
import java.util.regex.Pattern

import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer._

import scala.concurrent.ExecutionContext

case class ShiftingConsumerImpl[F[_]: Async, K, V](
    c: ConsumerApi[F, K, V],
    blockingContext: ExecutionContext
)(implicit CS: ContextShift[F])
    extends ConsumerApi[F, K, V] {
  def assign(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.assign(partitions))
  def assignment: F[Set[TopicPartition]] = CS.evalOn(blockingContext)(c.assignment)
  def beginningOffsets(partitions: Iterable[TopicPartition]): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.beginningOffsets(partitions))
  def beginningOffsets(
      partitions: Iterable[TopicPartition],
      timeout: FiniteDuration
  ): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.beginningOffsets(partitions, timeout))
  def close: F[Unit] = CS.evalOn(blockingContext)(c.close)
  def close(timeout: FiniteDuration): F[Unit] = CS.evalOn(blockingContext)(c.close(timeout))
  def commitAsync: F[Unit] = CS.evalOn(blockingContext)(c.commitAsync)
  def commitAsync(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      callback: OffsetCommitCallback
  ): F[Unit] =
    CS.evalOn(blockingContext)(c.commitAsync(offsets, callback))
  def commitAsync(callback: OffsetCommitCallback): F[Unit] =
    CS.evalOn(blockingContext)(c.commitAsync(callback))
  def commitSync: F[Unit] = CS.evalOn(blockingContext)(c.commitSync)
  def commitSync(offsets: Map[TopicPartition, OffsetAndMetadata]): F[Unit] =
    CS.evalOn(blockingContext)(c.commitSync(offsets))
  def committed(partition: Set[TopicPartition]): F[Map[TopicPartition, OffsetAndMetadata]] =
    CS.evalOn(blockingContext)(c.committed(partition))
  def endOffsets(partitions: Iterable[TopicPartition]): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.endOffsets(partitions))
  def endOffsets(
      partitions: Iterable[TopicPartition],
      timeout: FiniteDuration
  ): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.endOffsets(partitions, timeout))
  def listTopics: F[Map[String, Seq[PartitionInfo]]] = CS.evalOn(blockingContext)(c.listTopics)
  def listTopics(timeout: FiniteDuration): F[Map[String, Seq[PartitionInfo]]] =
    CS.evalOn(blockingContext)(c.listTopics(timeout))
  def metrics: F[Map[MetricName, Metric]] = CS.evalOn(blockingContext)(c.metrics)
  def offsetsForTimes(
      timestampsToSearch: Map[TopicPartition, Long]
  ): F[Map[TopicPartition, OffsetAndTimestamp]] =
    CS.evalOn(blockingContext)(c.offsetsForTimes(timestampsToSearch))
  def offsetsForTimes(
      timestampsToSearch: Map[TopicPartition, Long],
      timeout: FiniteDuration
  ): F[Map[TopicPartition, OffsetAndTimestamp]] =
    CS.evalOn(blockingContext)(c.offsetsForTimes(timestampsToSearch, timeout))
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(c.partitionsFor(topic))
  def partitionsFor(topic: String, timeout: FiniteDuration): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(c.partitionsFor(topic, timeout))
  def pause(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.pause(partitions))
  def paused: F[Set[TopicPartition]] = CS.evalOn(blockingContext)(c.paused)
  def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] =
    CS.evalOn(blockingContext)(c.poll(timeout))
  def position(partition: TopicPartition): F[Long] =
    CS.evalOn(blockingContext)(c.position(partition))
  def resume(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.resume(partitions))
  def seek(partition: TopicPartition, offset: Long): F[Unit] =
    CS.evalOn(blockingContext)(c.seek(partition, offset))
  def seekToBeginning(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.seekToBeginning(partitions))
  def seekToEnd(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.seekToEnd(partitions))
  def subscribe(topics: Iterable[String]): F[Unit] = CS.evalOn(blockingContext)(c.subscribe(topics))
  def subscribe(topics: Iterable[String], callback: ConsumerRebalanceListener): F[Unit] =
    CS.evalOn(blockingContext)(c.subscribe(topics, callback))
  def subscribe(pattern: Pattern): F[Unit] = CS.evalOn(blockingContext)(c.subscribe(pattern))
  def subscribe(pattern: Pattern, callback: ConsumerRebalanceListener): F[Unit] =
    CS.evalOn(blockingContext)(c.subscribe(pattern, callback))
  def subscription: F[Set[String]] = CS.evalOn(blockingContext)(c.subscription)
  def unsubscribe: F[Unit] = CS.evalOn(blockingContext)(c.unsubscribe)
  def wakeup: F[Unit] = c.wakeup //TODO wakeup is the one method that is thread-safe, right?
}

object ShiftingConsumerImpl {
  //returns the type expected when creating a Resource
  def create[F[_]: Async: ContextShift, K, V](
      c: ConsumerApi[F, K, V],
      e: ExecutionContext
  ): ConsumerApi[F, K, V] =
    ShiftingConsumerImpl(c, e)
} 
Example 133
Source File: TwitterSrvImpl.scala    From gospeak   with Apache License 2.0 5 votes vote down vote up
package gospeak.infra.services.twitter

import cats.effect.{ContextShift, IO}
import com.danielasfregola.twitter4s.TwitterRestClient
import com.danielasfregola.twitter4s.entities.{AccessToken, ConsumerToken, Tweet}
import gospeak.core.services.twitter.{TwitterConf, TwitterSrv, domain => gs}

import scala.concurrent.ExecutionContext

class TwitterSrvImpl(conf: TwitterConf) extends TwitterSrv {
  implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  private val consumerToken = ConsumerToken(key = conf.consumerKey, secret = conf.consumerSecret.decode)
  private val accessToken = AccessToken(key = conf.accessKey, secret = conf.accessSecret.decode)
  private val restClient = TwitterRestClient(consumerToken, accessToken)

  def tweet(msg: String): IO[gs.Tweet] =
    IO.fromFuture(IO(restClient.createTweet(trim(msg)))).map(fromLib)

  private def fromLib(t: Tweet): gs.Tweet =
    gs.Tweet(
      id = t.id,
      text = t.text)
} 
Example 134
Source File: InternalSpec.scala    From fs2-aws   with MIT License 5 votes vote down vote up
package fs2.aws.core

import cats.effect.{ ContextShift, IO }
import fs2.Stream
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.ExecutionContext

class InternalSpec extends AnyFlatSpec with Matchers {
  implicit val ec: ExecutionContext             = ExecutionContext.global
  implicit val ioContextShift: ContextShift[IO] = IO.contextShift(ec)

  "groupBy" should "create K substreams based on K selector outputs" in {
    val k = 30
    val streams = Stream
      .emits(1 to 100000)
      .through(groupBy(i => IO(i % k)))
      .compile
      .toVector
      .unsafeRunSync

    streams.size shouldBe k
  }

  it should "split stream elements into respective substreams" in {
    val streams = Stream
      .emits(1 to 10)
      .through(groupBy(i => IO(i % 2)))
      .compile
      .toVector
      .unsafeRunSync

    streams.filter(_._1 == 0).head._2.compile.toVector.unsafeRunSync shouldBe List(2, 4, 6, 8, 10)
    streams.filter(_._1 == 1).head._2.compile.toVector.unsafeRunSync shouldBe List(1, 3, 5, 7, 9)
  }

  it should "fail on exception" in {
    val streams = Stream
      .emits(1 to 10)
      .through(groupBy(i => IO(throw new Exception())))
      .attempt
      .compile
      .toVector
      .unsafeRunSync

    streams.size        shouldBe 1
    streams.head.isLeft shouldBe true
  }
} 
Example 135
Source File: CirisDecoderSpec.scala    From fs2-aws   with MIT License 5 votes vote down vote up
package fs2.aws.ciris;

import java.util.Date

import cats.effect.{ ContextShift, IO }
import ciris.{ ConfigException, ConfigValue }
import org.scalatest.Assertion
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import software.amazon.kinesis.common.InitialPositionInStream

import scala.concurrent.ExecutionContext.Implicits.global;

class CirisDecoderSpec extends AnyWordSpec with Matchers {
  implicit val cs: ContextShift[IO] = IO.contextShift(global)

  "InitialPositionDecoderSpec" should {

    "when decoding Either[InitialPositionInStream, Date]" can {

      // same package, so `import fs2.aws.ciris._` not necessary here
      def decode(testStr: String): Either[InitialPositionInStream, Date] =
        ConfigValue
          .default(testStr)
          .as[Either[InitialPositionInStream, Date]]
          .load[IO]
          .unsafeRunSync()

      def expectDecodeFailure(testString: String): Assertion =
        intercept[ConfigException] {
          decode(testString)
        }.getMessage should include(
          s"Unable to convert value $testString to InitialPositionInStream"
        )

      "decode supported strings as initial offsets" in {

        decode("LATEST")           should equal(Left(InitialPositionInStream.LATEST))
        decode("TRIM_HORIZON")     should equal(Left(InitialPositionInStream.TRIM_HORIZON))
        decode("TS_1592404273000") should equal(Right(new Date(1592404273000L)))

      }

      "fail to decode valid strings" in {

        expectDecodeFailure("FOOBAR")
        expectDecodeFailure("TS_FOO")
        expectDecodeFailure("TS_")
        expectDecodeFailure("_1592404273000")

      }
    }

  }

} 
Example 136
Source File: SqsSpec.scala    From fs2-aws   with MIT License 5 votes vote down vote up
package fs2.aws

import cats.effect.concurrent.Deferred
import cats.effect.{ ContextShift, IO }
import com.amazon.sqs.javamessaging.SQSConnection
import com.amazon.sqs.javamessaging.message.SQSTextMessage
import eu.timepit.refined.auto._
import fs2.aws
import fs2.aws.sqs.{ ConsumerBuilder, SQSConsumer, SqsConfig }
import javax.jms.{ Message, MessageListener, TextMessage }
import org.mockito.scalatest.MockitoSugar
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.ExecutionContext

class SqsSpec extends AsyncFlatSpec with Matchers with MockitoSugar {
  implicit val ec: ExecutionContext             = ExecutionContext.global
  implicit val ioContextShift: ContextShift[IO] = IO.contextShift(ec)
  implicit val messageDecoder: Message => Either[Throwable, Int] = { sqs_msg =>
    val text = sqs_msg.asInstanceOf[TextMessage].getText
    if ("fail" == text) Left(intercept[Exception](()))
    else Right(text.toInt)
  }
  "SQS endpoint" should "stream messages" in {

    def stream(d: Deferred[IO, MessageListener]) =
      aws
        .sqsStream[IO, Int](
          SqsConfig("dummy"),
          (_, listener) =>
            new ConsumerBuilder[IO] {
              override def start: IO[SQSConsumer] =
                IO.delay(new SQSConsumer {
                  override def callback: MessageListener = listener

                  override def startConsumer(): Unit = ()

                  override def shutdown(): Unit = ()

                  override def connection: SQSConnection = mock[SQSConnection]
                })
            },
          Some(d)
        )
        .take(4)
        .compile
        .toList

    val r = for {
      d <- Deferred[IO, MessageListener]
      res <- IO.racePair(stream(d), d.get).flatMap {
              case Right((streamFiber, listener)) =>
                listener.onMessage(new SQSTextMessage("1"))
                listener.onMessage(new SQSTextMessage("2"))
                listener.onMessage(new SQSTextMessage("fail"))
                listener.onMessage(new SQSTextMessage("4"))
                listener.onMessage(new SQSTextMessage("5"))
                streamFiber.join
              case _ => IO(Nil)
            }
    } yield res

    val future = r.unsafeToFuture()

    future.map(_ should be(List(1, 2, 4, 5)))
  }
} 
Example 137
Source File: S3Spec.scala    From fs2-aws   with MIT License 5 votes vote down vote up
package fs2
package aws

import java.util.concurrent.Executors

import cats.effect.{ ContextShift, IO }
import com.amazonaws.services.s3.AmazonS3
import fs2.aws.internal.S3Client
import fs2.aws.s3._
import org.mockito.MockitoSugar._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.ExecutionContext

class S3Spec extends AnyFlatSpec with Matchers {

  private val blockingEC                        = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(6))
  implicit val ec: ExecutionContext             = ExecutionContext.global
  implicit val ioContextShift: ContextShift[IO] = IO.contextShift(ec)

  implicit val s3Client: S3Client[IO] = fs2.aws.utils.s3TestClient
  val mockS3                          = mock[AmazonS3]

  ignore should "stdout the jsonfile" in {
    readS3FileMultipart[IO]("resources", "jsontest.json", 25, mockS3).compile.toVector.unsafeRunSync should be(
      Vector()
    )
  }

  "Downloading the JSON test file by chunks" should "return the same content" in {
    readS3FileMultipart[IO]("resources", "jsontest.json", 25, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be(
      """{"test": 1}{"test": 2}{"test": 3}{"test": 4}{"test": 5}{"test": 6}{"test": 7}{"test": 8}"""
    )
  }

  "Downloading the JSON test file" should "return the same content" in {
    readS3File[IO]("resources", "jsontest.json", blockingEC, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be(
      """{"test": 1}{"test": 2}{"test": 3}{"test": 4}{"test": 5}{"test": 6}{"test": 7}{"test": 8}"""
    )
  }

  "Downloading the versioned JSON test file" should "return the same content" in {
    readS3VersionedFile[IO]("resources", "jsontest.json", version = "ABC", blockingEC, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be(
      """{"this": 1}{"is": 2}{"versioned": 3}{"content": 4}"""
    )
  }

  "big chunk size but small entire text" should "be trimmed to content" in {
    readS3FileMultipart[IO]("resources", "jsontest1.json", 25, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be("""{"test": 1}""")
  }
} 
Example 138
Source File: DatabaseConfig.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore.config

import cats.syntax.functor._
import cats.effect.{Async, Blocker, ContextShift, Resource, Sync}
import doobie.hikari.HikariTransactor
import org.flywaydb.core.Flyway

import scala.concurrent.ExecutionContext

case class DatabaseConnectionsConfig(poolSize: Int)
case class DatabaseConfig(
    url: String,
    driver: String,
    user: String,
    password: String,
    connections: DatabaseConnectionsConfig,
)

object DatabaseConfig {
  def dbTransactor[F[_]: Async: ContextShift](
      dbc: DatabaseConfig,
      connEc: ExecutionContext,
      blocker: Blocker,
  ): Resource[F, HikariTransactor[F]] =
    HikariTransactor
      .newHikariTransactor[F](dbc.driver, dbc.url, dbc.user, dbc.password, connEc, blocker)

  
  def initializeDb[F[_]](cfg: DatabaseConfig)(implicit S: Sync[F]): F[Unit] =
    S.delay {
        val fw: Flyway = {
          Flyway
            .configure()
            .dataSource(cfg.url, cfg.user, cfg.password)
            .load()
        }
        fw.migrate()
      }
      .as(())
} 
Example 139
Source File: package.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore
package infrastructure.repository

import cats.implicits._
import cats.effect.{Async, ContextShift, Effect, IO}
import config._
import _root_.doobie.Transactor
import io.circe.config.parser

import scala.concurrent.ExecutionContext

package object doobie {
  def getTransactor[F[_]: Async: ContextShift](cfg: DatabaseConfig): Transactor[F] =
    Transactor.fromDriverManager[F](
      cfg.driver, // driver classname
      cfg.url, // connect URL (driver-specific)
      cfg.user, // user
      cfg.password, // password
    )

  
  def initializedTransactor[F[_]: Effect: Async: ContextShift]: F[Transactor[F]] =
    for {
      petConfig <- parser.decodePathF[F, PetStoreConfig]("petstore")
      _ <- DatabaseConfig.initializeDb(petConfig.db)
    } yield getTransactor(petConfig.db)

  lazy val testEc = ExecutionContext.Implicits.global

  implicit lazy val testCs = IO.contextShift(testEc)

  lazy val testTransactor = initializedTransactor[IO].unsafeRunSync()
} 
Example 140
Source File: StreamFs2.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.examples

object StreamFs2 extends App {
  import sttp.client._
  import sttp.client.asynchttpclient.fs2.AsyncHttpClientFs2Backend

  import cats.effect.{ContextShift, IO}
  import cats.instances.string._
  import fs2.{Stream, text}

  implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global)

  def streamRequestBody(implicit backend: SttpBackend[IO, Stream[IO, Byte], NothingT]): IO[Unit] = {
    val stream: Stream[IO, Byte] = Stream.emits("Hello, world".getBytes)

    basicRequest
      .streamBody(stream)
      .post(uri"https://httpbin.org/post")
      .send()
      .map { response => println(s"RECEIVED:\n${response.body}") }
  }

  def streamResponseBody(implicit backend: SttpBackend[IO, Stream[IO, Byte], NothingT]): IO[Unit] = {
    basicRequest
      .body("I want a stream!")
      .post(uri"https://httpbin.org/post")
      .response(asStreamAlways[Stream[IO, Byte]])
      .send()
      .flatMap { response =>
        response.body.chunks
          .through(text.utf8DecodeC)
          .compile
          .foldMonoid
      }
      .map { body => println(s"RECEIVED:\n$body") }
  }

  val effect = AsyncHttpClientFs2Backend[IO]().flatMap { implicit backend =>
    streamRequestBody.flatMap(_ => streamResponseBody).guarantee(backend.close())
  }

  effect.unsafeRunSync()
} 
Example 141
Source File: Stryker4sMain.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.maven

import javax.inject.Inject
import org.apache.maven.plugin.{AbstractMojo, MojoFailureException}
import org.apache.maven.plugins.annotations.{Mojo, Parameter}
import org.apache.maven.project.MavenProject
import stryker4s.run.threshold.ErrorStatus
import scala.concurrent.ExecutionContext.Implicits.global
import cats.effect.{ContextShift, IO}
import scala.concurrent.ExecutionContext


@Mojo(name = "run")
class Stryker4sMain @Inject() (@Parameter(defaultValue = "${project}") project: MavenProject) extends AbstractMojo {
  override def execute(): Unit = {
    implicit val cs: ContextShift[IO] = IO.contextShift(implicitly[ExecutionContext])
    new Stryker4sMavenRunner(project).run() match {
      case ErrorStatus => throw new MojoFailureException("Mutation score was below configured threshold")
      case _           =>
    }
  }
} 
Example 142
Source File: Reporter.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.report

import grizzled.slf4j.Logging
import stryker4s.config._
import stryker4s.files.DiskFileIO
import stryker4s.model.{Mutant, MutantRunResult}
import stryker4s.report.dashboard.DashboardConfigProvider
import cats.implicits._
import cats.effect.IO
import sttp.client.asynchttpclient.cats.AsyncHttpClientCatsBackend
import cats.effect.ContextShift

class Reporter(implicit config: Config, cs: ContextShift[IO])
    extends FinishedRunReporter
    with ProgressReporter
    with Logging {

  lazy val reporters: Iterable[MutationRunReporter] = config.reporters map {
    case Console => new ConsoleReporter()
    case Html    => new HtmlReporter(new DiskFileIO())
    case Json    => new JsonReporter(new DiskFileIO())
    case Dashboard =>
      AsyncHttpClientCatsBackend[IO]()
        .map { implicit backend =>
          new DashboardReporter(new DashboardConfigProvider(sys.env))
        }
        // TODO: Figure out some other way to do this?
        .unsafeRunSync()

  }

  private[this] lazy val progressReporters = reporters collect { case r: ProgressReporter => r }
  private[this] lazy val finishedRunReporters = reporters collect { case r: FinishedRunReporter => r }

  override def reportMutationStart(mutant: Mutant): IO[Unit] =
    reportAll[ProgressReporter](
      progressReporters,
      _.reportMutationStart(mutant)
    )

  override def reportMutationComplete(result: MutantRunResult, totalMutants: Int): IO[Unit] =
    reportAll[ProgressReporter](
      progressReporters,
      _.reportMutationComplete(result, totalMutants)
    )

  override def reportRunFinished(runReport: FinishedRunReport): IO[Unit] = {
    reportAll[FinishedRunReporter](
      finishedRunReporters,
      reporter => reporter.reportRunFinished(runReport)
    )
  }

  
  private def reportAll[T](reporters: Iterable[T], reportF: T => IO[Unit]): IO[Unit] = {
    reporters.toList
      .parTraverse { reporter =>
        reportF(reporter).attempt
      }
      .map { _ collect { case Left(f) => f } }
      .flatMap { failed =>
        if (failed.nonEmpty) IO {
          warn(s"${failed.size} reporter(s) failed to report:")
          failed.foreach(warn(_))
        }
        else IO.unit
      }
  }
} 
Example 143
Source File: fileIO.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.files
import cats.effect.IO
import fs2._
import fs2.io.readInputStream
import fs2.io.file._
import cats.effect.Blocker
import cats.effect.ContextShift
import cats.effect.Sync
import java.nio.file.Path
sealed trait FileIO {
  def createAndWriteFromResource(file: Path, resource: String): IO[Unit]

  def createAndWrite(file: Path, content: String): IO[Unit]
}

class DiskFileIO()(implicit cs: ContextShift[IO], s: Sync[IO]) extends FileIO {
  override def createAndWriteFromResource(file: Path, resourceName: String): IO[Unit] =
    Blocker[IO].use { blocker =>
      val stream = IO { getClass().getResourceAsStream(resourceName) }

      createDirectories(blocker, file.getParent()) *>
        readInputStream(stream, 8192, blocker)
          .through(writeAll(file, blocker))
          .compile
          .drain
    }

  override def createAndWrite(file: Path, content: String): IO[Unit] =
    Blocker[IO].use { blocker =>
      createDirectories(blocker, file.getParent()) *>
        Stream(content)
          .through(text.utf8Encode)
          .through(writeAll(file, blocker))
          .compile
          .drain
    }
} 
Example 144
Source File: Stryker4sRunner.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.run

import stryker4s.Stryker4s
import stryker4s.config.{Config, ConfigReader}
import stryker4s.mutants.Mutator
import stryker4s.mutants.applymutants.ActiveMutationContext.ActiveMutationContext
import stryker4s.mutants.applymutants.{MatchBuilder, StatementTransformer}
import stryker4s.mutants.findmutants.{FileCollector, MutantFinder, MutantMatcher, SourceCollector}
import stryker4s.report.Reporter
import stryker4s.run.process.ProcessRunner
import stryker4s.run.threshold.ScoreStatus
import cats.effect.ContextShift
import cats.effect.IO

trait Stryker4sRunner {
  def run()(implicit cs: ContextShift[IO]): ScoreStatus = {
    implicit val config: Config = ConfigReader.readConfig()

    val collector = new FileCollector(ProcessRunner())
    val stryker4s = new Stryker4s(
      collector,
      new Mutator(new MutantFinder(new MutantMatcher), new StatementTransformer, new MatchBuilder(mutationActivation)),
      resolveRunner(collector, new Reporter())
    )
    stryker4s.run()
  }

  def resolveRunner(collector: SourceCollector, reporter: Reporter)(implicit config: Config): MutantRunner

  def mutationActivation: ActiveMutationContext
} 
Example 145
Source File: FS2CronTest.scala    From fs2-cron   with Apache License 2.0 5 votes vote down vote up
package eu.timepit.fs2cron

import cats.effect.{ContextShift, IO, Timer}
import cron4s.Cron
import cron4s.expr.CronExpr
import scala.concurrent.ExecutionContext
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

class FS2CronTest extends AnyFunSuite with Matchers {
  implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)
  val evenSeconds: CronExpr = Cron.unsafeParse("*/2 * * ? * *")
  def isEven(i: Int): Boolean = i % 2 == 0

  test("awakeEveryCron") {
    val s1 = awakeEveryCron[IO](evenSeconds) >> evalNow[IO]
    val s2 = s1.map(_.getSecond).take(2).forall(isEven)
    s2.compile.last.map(_.getOrElse(false)).unsafeRunSync()
  }

  test("sleepCron") {
    val s1 = sleepCron[IO](evenSeconds) >> evalNow[IO]
    val s2 = s1.map(_.getSecond).forall(isEven)
    s2.compile.last.map(_.getOrElse(false)).unsafeRunSync()
  }

  test("schedule") {
    implicit val ctxShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
    val everySecond: CronExpr = Cron.unsafeParse("* * * ? * *")
    val s1 = schedule(List(everySecond -> evalNow[IO], evenSeconds -> evalNow[IO])).map(_.getSecond)

    val seconds = s1.take(3).compile.toList.unsafeRunSync()
    seconds.count(isEven) shouldBe 2
    seconds.count(!isEven(_)) shouldBe 1
  }
} 
Example 146
Source File: DeepBindBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.{ContextShift, IO}
import org.openjdk.jmh.annotations._
import scala.concurrent.ExecutionContext.Implicits


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class DeepBindBenchmark {
  implicit val cs: ContextShift[IO] = IO.contextShift(Implicits.global)

  @Param(Array("3000"))
  var size: Int = _

  @Benchmark
  def pure(): Int = {
    def loop(i: Int): IO[Int] =
      for {
        j <- IO.pure(i)
        _ <- if (j > size) IO.pure(j) else loop(j + 1)
      } yield j

    loop(0).unsafeRunSync()
  }

  @Benchmark
  def delay(): Int = {
    def loop(i: Int): IO[Int] =
      for {
        j <- IO(i)
        _ <- if (j > size) IO(j) else loop(j + 1)
      } yield j

    loop(0).unsafeRunSync()
  }

  @Benchmark
  def async(): Int = {
    def loop(i: Int): IO[Int] =
      for {
        j <- IO(i)
        _ <- IO.shift
        _ <- if (j > size) IO(j) else loop(j + 1)
      } yield j

    loop(0).unsafeRunSync()
  }
} 
Example 147
Source File: ShallowBindBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.{ContextShift, IO}
import org.openjdk.jmh.annotations._
import scala.concurrent.ExecutionContext.Implicits


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class ShallowBindBenchmark {
  implicit val cs: ContextShift[IO] = IO.contextShift(Implicits.global)

  @Param(Array("10000"))
  var size: Int = _

  @Benchmark
  def pure(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO.pure(i + 1).flatMap(loop)
      else IO.pure(i)

    IO.pure(0)
      .flatMap(loop)
      .unsafeRunSync()
  }

  @Benchmark
  def delay(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO(i + 1).flatMap(loop)
      else IO(i)

    IO(0).flatMap(loop).unsafeRunSync()
  }

  @Benchmark
  def async(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO.shift.flatMap(_ => IO.pure(i + 1)).flatMap(loop)
      else IO.shift.flatMap(_ => IO.pure(i))

    IO(0).flatMap(loop).unsafeRunSync()
  }
} 
Example 148
Source File: IOStart.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.internals

import cats.effect.{ContextShift, Fiber, IO}
import cats.implicits._
import scala.concurrent.Promise

private[effect] object IOStart {

  
  def apply[A](cs: ContextShift[IO], fa: IO[A]): IO[Fiber[IO, A]] = {
    val start: Start[Fiber[IO, A]] = (_, cb) => {
      // Memoization
      val p = Promise[Either[Throwable, A]]()

      // Starting the source `IO`, with a new connection, because its
      // cancellation is now decoupled from our current one
      val conn2 = IOConnection()
      val cb0 = { (ea: Either[Throwable, A]) =>
        p.success(ea)
        ()
      }
      IORunLoop.startCancelable(IOForkedStart(fa, cs), conn2, cb0)

      cb(Right(fiber(p, conn2)))
    }
    IO.Async(start, trampolineAfter = true)
  }

  private[internals] def fiber[A](p: Promise[Either[Throwable, A]], conn: IOConnection): Fiber[IO, A] =
    Fiber(IOFromFuture(p.future).rethrow, conn.cancel)
} 
Example 149
Source File: JvmMonitoringTest.scala    From datadog4s   with MIT License 5 votes vote down vote up
package com.avast.datadog4s.extension.jvm

import java.time.Duration

import cats.effect.{ ContextShift, IO, Timer }
import com.avast.cloud.datadog4s.inmemory.MockMetricsFactory
import com.avast.datadog4s.extension.jvm.JvmMonitoring.Config
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
import cats.syntax.flatMap._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class JvmMonitoringTest extends AnyFlatSpec with Matchers {
  private val ec: ExecutionContext            = scala.concurrent.ExecutionContext.Implicits.global
  implicit val contextShift: ContextShift[IO] = cats.effect.IO.contextShift(ec)
  implicit val timer: Timer[IO]               = IO.timer(ec)

  val noopErrHandler: Throwable => IO[Unit] = (_: Throwable) => IO.unit

  "JvmMonitoring" should "create all expected metrics and update them periodically" in {
    val testEffect = MockMetricsFactory.make[IO].flatMap { inmemory =>
      val runTest = JvmMonitoring
        .configured(inmemory, Config().copy(delay = Duration.ofMillis(10)), noopErrHandler)
        .use(_ => IO.never)
        .timeout(100.millis)
        .attempt

      runTest >> inmemory.state.get
    }
    val result     = testEffect.unsafeRunSync()
    result.keySet must equal(expectedAspects)
    result.values.foreach { vector =>
      vector.groupBy(_.tags).foreach {
        case (_, records) =>
          records.size must be > 0
          records.size must be < 15
      }
    }
  }

  val minorGcParams =
    if (System.getProperty("java.version").startsWith("1.8."))
      Set.empty
    else Set("jvm.gc.minor_collection_time", "jvm.gc.minor_collection_count")

  val expectedAspects: Set[String] = Set(
    "jvm.cpu.load",
    "jvm.cpu.time",
    "jvm.filedescriptor.open",
    "jvm.heap_memory",
    "jvm.heap_memory_committed",
    "jvm.heap_memory_init",
    "jvm.heap_memory_max",
    "jvm.heap_memory.eden",
    "jvm.heap_memory.eden_committed",
    "jvm.heap_memory.eden_max",
    "jvm.heap_memory.survivor",
    "jvm.heap_memory.survivor_committed",
    "jvm.heap_memory.survivor_max",
    "jvm.heap_memory.old_gen",
    "jvm.heap_memory.old_gen_committed",
    "jvm.heap_memory.old_gen_max",
    "jvm.non_heap_memory",
    "jvm.non_heap_memory_committed",
    "jvm.non_heap_memory_init",
    "jvm.non_heap_memory_max",
    "jvm.non_heap_memory.code_cache",
    "jvm.non_heap_memory.code_cache_committed",
    "jvm.non_heap_memory.code_cache_max",
    "jvm.non_heap_memory.metaspace",
    "jvm.non_heap_memory.metaspace_committed",
    "jvm.non_heap_memory.metaspace_max",
    "jvm.non_heap_memory.compressed_class_space",
    "jvm.non_heap_memory.compressed_class_space_committed",
    "jvm.non_heap_memory.compressed_class_space_max",
    "jvm.uptime",
    "jvm.thread_count",
    "jvm.thread_daemon",
    "jvm.thread_started",
    "jvm.loaded_classes",
    "jvm.bufferpool.instances",
    "jvm.bufferpool.bytes",
    "jvm.gc.major_collection_time",
    "jvm.gc.major_collection_count"
  ) ++ minorGcParams
} 
Example 150
Source File: RedisChannel.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.net.InetSocketAddress

import _root_.fs2._
import _root_.fs2.io.tcp.{Socket, SocketGroup}
import cats.MonadError
import cats.effect.{Blocker, Concurrent, ContextShift, Resource}
import cats.syntax.flatMap._
import laserdisc.protocol._
import log.effect.LogWriter
import scodec.Codec
import scodec.stream.{StreamDecoder, StreamEncoder}

import scala.concurrent.duration.FiniteDuration

object RedisChannel {
  private[this] final val streamDecoder = StreamDecoder.many(Codec[RESP])
  private[this] final val streamEncoder = StreamEncoder.many(Codec[RESP])

  private[fs2] final def apply[F[_]: ContextShift: LogWriter: Concurrent](
      address: InetSocketAddress,
      writeTimeout: Option[FiniteDuration],
      readMaxBytes: Int
  )(blocker: Blocker): Pipe[F, RESP, RESP] = {
    def connectedSocket: Resource[F, Socket[F]] =
      SocketGroup(blocker, nonBlockingThreadCount = 4) >>= (_.client(address, noDelay = true))

    stream =>
      Stream.resource(connectedSocket) >>= { socket =>
        val send    = stream.through(impl.send(socket.writes(writeTimeout)))
        val receive = socket.reads(readMaxBytes).through(impl.receiveResp)

        send.drain
          .covaryOutput[RESP]
          .mergeHaltBoth(receive)
          .onFinalizeWeak(socket.endOfOutput)
      }
  }

  private[this] final object impl {
    def send[F[_]: MonadError[*[_], Throwable]](socketChannel: Pipe[F, Byte, Unit])(
        implicit log: LogWriter[F]
    ): Pipe[F, RESP, Unit] =
      _.evalTap(resp => log.trace(s"sending $resp"))
        .through(streamEncoder.encode[F])
        .flatMap(bits => Stream.chunk(Chunk.bytes(bits.toByteArray)))
        .through(socketChannel)

    def receiveResp[F[_]: MonadError[*[_], Throwable]](implicit log: LogWriter[F]): Pipe[F, Byte, RESP] = {
      def framing: Pipe[F, Byte, CompleteFrame] = {
        def loopScan(bytesIn: Stream[F, Byte], previous: RESPFrame): Pull[F, CompleteFrame, Unit] =
          bytesIn.pull.uncons.flatMap {
            case Some((chunk, rest)) =>
              previous.append(chunk.toByteBuffer) match {
                case Left(ex)                    => Pull.raiseError(ex)
                case Right(frame: CompleteFrame) => Pull.output1(frame) >> loopScan(rest, EmptyFrame)
                case Right(frame: MoreThanOneFrame) =>
                  Pull.output(Chunk.vector(frame.complete)) >> {
                    if (frame.remainder.isEmpty) loopScan(rest, EmptyFrame)
                    else loopScan(rest, IncompleteFrame(frame.remainder, 0L))
                  }
                case Right(frame: IncompleteFrame) => loopScan(rest, frame)
              }

            case _ => Pull.done
          }

        bytesIn => loopScan(bytesIn, EmptyFrame).stream
      }

      pipeIn =>
        streamDecoder
          .decode(pipeIn.through(framing) map (_.bits))
          .evalTap(resp => log.trace(s"receiving $resp"))
    }
  }
} 
Example 151
Source File: Algebras.scala    From hydra   with Apache License 2.0 4 votes vote down vote up
package hydra.ingest.modules

import cats.effect.{Async, ConcurrentEffect, ContextShift, Timer}
import cats.implicits._
import hydra.avro.registry.SchemaRegistry
import hydra.ingest.app.AppConfig.AppConfig
import hydra.kafka.algebras.{KafkaAdminAlgebra, KafkaClientAlgebra, MetadataAlgebra}
import io.chrisdavenport.log4cats.Logger

final class Algebras[F[_]] private (
    val schemaRegistry: SchemaRegistry[F],
    val kafkaAdmin: KafkaAdminAlgebra[F],
    val kafkaClient: KafkaClientAlgebra[F],
    val metadata: MetadataAlgebra[F]
)

object Algebras {

  def make[F[_]: Async: ConcurrentEffect: ContextShift: Timer: Logger](config: AppConfig): F[Algebras[F]] =
    for {
      schemaRegistry <- SchemaRegistry.live[F](
        config.createTopicConfig.schemaRegistryConfig.fullUrl,
        config.createTopicConfig.schemaRegistryConfig.maxCacheSize
      )
      kafkaAdmin <- KafkaAdminAlgebra.live[F](config.createTopicConfig.bootstrapServers)
      kafkaClient <- KafkaClientAlgebra.live[F](config.createTopicConfig.bootstrapServers, schemaRegistry, config.ingestConfig.recordSizeLimitBytes)
      metadata <- MetadataAlgebra.make[F](config.v2MetadataTopicConfig.topicName.value,
        config.v2MetadataTopicConfig.consumerGroup, kafkaClient, schemaRegistry, config.v2MetadataTopicConfig.createOnStartup)
    } yield new Algebras[F](schemaRegistry, kafkaAdmin, kafkaClient, metadata)
} 
Example 152
Source File: RollingFileLogger.scala    From odin   with Apache License 2.0 4 votes vote down vote up
package io.odin.loggers

import java.nio.file.{Files, Path, Paths}
import java.time.{Instant, LocalDateTime}
import java.time.format.DateTimeFormatter
import java.util.TimeZone
import java.util.concurrent.TimeUnit

import cats.Monad
import cats.effect.concurrent.Ref
import cats.effect.{Concurrent, ContextShift, Fiber, Resource, Timer}
import cats.syntax.all._
import io.odin.formatter.Formatter
import io.odin.{Level, Logger, LoggerMessage}

import scala.concurrent.duration.{FiniteDuration, _}

object RollingFileLogger {

  def apply[F[_]](
      fileNamePattern: LocalDateTime => String,
      maxFileSizeInBytes: Option[Long],
      rolloverInterval: Option[FiniteDuration],
      formatter: Formatter,
      minLevel: Level
  )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]): Resource[F, Logger[F]] = {
    new RollingFileLoggerFactory(
      fileNamePattern,
      maxFileSizeInBytes,
      rolloverInterval,
      formatter,
      minLevel,
      FileLogger.apply[F]
    ).mk
  }

  private[odin] class RefLogger[F[_]: Timer: Monad](
      current: Ref[F, Logger[F]],
      override val minLevel: Level
  ) extends DefaultLogger[F](minLevel) {

    def log(msg: LoggerMessage): F[Unit] = current.get.flatMap(_.log(msg))

    override def log(msgs: List[LoggerMessage]): F[Unit] = current.get.flatMap(_.log(msgs))

  }

  private[odin] class RollingFileLoggerFactory[F[_]](
      fileNamePattern: LocalDateTime => String,
      maxFileSizeInBytes: Option[Long],
      rolloverInterval: Option[FiniteDuration],
      formatter: Formatter,
      minLevel: Level,
      underlyingLogger: (String, Formatter, Level) => Resource[F, Logger[F]],
      fileSizeCheck: Path => Long = Files.size
  )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]) {

    val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss")

    def mk: Resource[F, Logger[F]] = {
      val logger = for {
        ((logger, watcherFiber), release) <- allocate.allocated
        refLogger <- Ref.of(logger)
        refRelease <- Ref.of(release)
        _ <- F.start(rollingLoop(watcherFiber, refLogger, refRelease))
      } yield {
        (new RefLogger(refLogger, minLevel), refRelease)
      }
      Resource.make(logger)(_._2.get.flatten).map {
        case (logger, _) => logger
      }
    }

    def now: F[Long] = timer.clock.realTime(TimeUnit.MILLISECONDS)

    
    def rollingLoop(watcher: Fiber[F, Unit], logger: Ref[F, Logger[F]], release: Ref[F, F[Unit]]): F[Unit] =
      for {
        _ <- watcher.join
        oldRelease <- release.get
        ((newLogger, newWatcher), newRelease) <- allocate.allocated
        _ <- logger.set(newLogger)
        _ <- release.set(newRelease)
        _ <- oldRelease
        _ <- rollingLoop(newWatcher, logger, release)
      } yield ()

  }

}