cats.effect.Blocker Scala Examples

The following examples show how to use cats.effect.Blocker. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ClientApp.scala    From fs2-chat   with MIT License 6 votes vote down vote up
package fs2chat
package client

import cats.effect.{Blocker, ExitCode, IO, IOApp}
import cats.implicits._
import com.comcast.ip4s._
import com.monovore.decline._
import fs2.io.tcp.SocketGroup

object ClientApp extends IOApp {
  private val argsParser: Command[(Username, SocketAddress[IpAddress])] =
    Command("fs2chat-client", "FS2 Chat Client") {
      (
        Opts
          .option[String]("username", "Desired username", "u")
          .map(Username.apply),
        Opts
          .option[String]("address", "Address of chat server")
          .withDefault("127.0.0.1")
          .mapValidated(p => IpAddress(p).toValidNel("Invalid IP address")),
        Opts
          .option[Int]("port", "Port of chat server")
          .withDefault(5555)
          .mapValidated(p => Port(p).toValidNel("Invalid port number"))
      ).mapN {
        case (desiredUsername, ip, port) =>
          desiredUsername -> SocketAddress(ip, port)
      }
    }

  def run(args: List[String]): IO[ExitCode] =
    argsParser.parse(args) match {
      case Left(help) => IO(System.err.println(help)).as(ExitCode.Error)
      case Right((desiredUsername, address)) =>
        Blocker[IO]
          .use { blocker =>
            Console[IO](blocker).flatMap { console =>
              SocketGroup[IO](blocker).use { socketGroup =>
                Client
                  .start[IO](console, socketGroup, address, desiredUsername)
                  .compile
                  .drain
              }
            }
          }
          .as(ExitCode.Success)
    }
} 
Example 2
Source File: ServerApp.scala    From fs2-chat   with MIT License 5 votes vote down vote up
package fs2chat
package server

import cats.effect.{Blocker, ExitCode, IO, IOApp}
import cats.implicits._
import com.comcast.ip4s._
import com.monovore.decline._
import fs2.io.tcp.SocketGroup
import io.chrisdavenport.log4cats.slf4j.Slf4jLogger

object ServerApp extends IOApp {
  private val argsParser: Command[Port] =
    Command("fs2chat-server", "FS2 Chat Server") {
      Opts
        .option[Int]("port", "Port to bind for connection requests")
        .withDefault(5555)
        .mapValidated(p => Port(p).toValidNel("Invalid port number"))
    }

  def run(args: List[String]): IO[ExitCode] =
    argsParser.parse(args) match {
      case Left(help) => IO(System.err.println(help)).as(ExitCode.Error)
      case Right(port) =>
        Blocker[IO]
          .use { blocker =>
            SocketGroup[IO](blocker).use { socketGroup =>
              Slf4jLogger.create[IO].flatMap { implicit logger =>
                Server.start[IO](socketGroup, port).compile.drain
              }
            }
          }
          .as(ExitCode.Success)
    }
} 
Example 3
Source File: AbstractInfluxSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.influx

import cats.effect.{Blocker, IO, Resource}
import ch.epfl.bluebrain.nexus.cli.{AbstractCliSpec, Console}
import ch.epfl.bluebrain.nexus.cli.clients.InfluxClient
import ch.epfl.bluebrain.nexus.cli.config.AppConfig
import ch.epfl.bluebrain.nexus.cli.influx.InfluxDocker.InfluxHostConfig
import izumi.distage.model.definition.{Module, ModuleDef}
import org.http4s.client.blaze.BlazeClientBuilder

import scala.concurrent.duration._

class AbstractInfluxSpec extends AbstractCliSpec {

  override protected def defaultModules: Module = {
    super.defaultModules ++ new InfluxDocker.Module[IO]
  }

  override def testModule: ModuleDef =
    new ModuleDef {
      make[AppConfig].fromEffect { host: InfluxHostConfig =>
        copyConfigs.flatMap {
          case (envFile, _, influxFile) =>
            AppConfig.load[IO](Some(envFile), influxConfigFile = Some(influxFile)).flatMap {
              case Left(value)  => IO.raiseError(value)
              case Right(value) =>
                val influxOffsetFile = influxFile.getParent.resolve("influx.offset")
                val cfg              = value.copy(influx =
                  value.influx.copy(
                    endpoint = host.endpoint,
                    offsetFile = influxOffsetFile,
                    offsetSaveInterval = 100.milliseconds
                  )
                )
                IO.pure(cfg)
            }
        }
      }
      make[InfluxClient[IO]].fromResource {
        (_: InfluxDocker.Container, cfg: AppConfig, blocker: Blocker, console: Console[IO]) =>
          BlazeClientBuilder[IO](blocker.blockingContext).resource.flatMap { client =>
            val influxClient = InfluxClient(client, cfg, console)
            waitForInfluxReady(influxClient).map(_ => influxClient)
          }
      }
    }

  private def waitForInfluxReady(
      client: InfluxClient[IO],
      maxDelay: FiniteDuration = 90.seconds
  ): Resource[IO, Unit] = {
    import retry.CatsEffect._
    import retry.RetryPolicies._
    import retry._
    val policy   = limitRetriesByCumulativeDelay[IO](maxDelay, constantDelay(5.second))
    val healthIO = retryingOnAllErrors(
      policy = policy,
      onError = (_: Throwable, _) => IO.delay(println("Influx Container not ready, retrying..."))
    ) {
      client.health.map {
        case Left(err) => throw err
        case Right(_)  => ()
      }
    }
    Resource.liftF(healthIO)
  }
} 
Example 4
Source File: PostgresProjectionSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.postgres

import java.time.OffsetDateTime

import cats.effect.{Blocker, IO}
import ch.epfl.bluebrain.nexus.cli.Console
import ch.epfl.bluebrain.nexus.cli.config.AppConfig
import ch.epfl.bluebrain.nexus.cli.modules.postgres.PostgresProjection
import ch.epfl.bluebrain.nexus.cli.modules.postgres.PostgresProjection.TimeMeta.javatime._
import ch.epfl.bluebrain.nexus.cli.sse.Offset
import doobie.util.transactor.Transactor
import fs2.io

//noinspection SqlNoDataSourceInspection
class PostgresProjectionSpec extends AbstractPostgresSpec {

  "A PostgresProjection" should {
    "project all schemas" in {
      import doobie.implicits._
      (xa: Transactor[IO], proj: PostgresProjection[IO]) =>
        for {
          _                                <- proj.run
          count                            <- sql"select count(id) from schemas;".query[Int].unique.transact(xa)
          _                                 = count shouldEqual 175
          maxImport                        <- sql"select id, count(import) from schema_imports group by id order by count desc limit 1;"
                                                .query[(String, Int)]
                                                .unique
                                                .transact(xa)
          (maxImportSchema, maxImportCount) = maxImport
          _                                 = maxImportSchema shouldEqual "https://neuroshapes.org/commons/entity"
          _                                 = maxImportCount shouldEqual 7
          lastUpdated                      <- sql"select last_updated from schemas where id = 'https://neuroshapes.org/commons/entity'"
                                                .query[OffsetDateTime]
                                                .unique
                                                .transact(xa)
          _                                 = lastUpdated.toInstant.toEpochMilli shouldEqual 1584615316089L
        } yield ()
    }
    "save offset" in { (cfg: AppConfig, blocker: Blocker, proj: PostgresProjection[IO], console: Console[IO]) =>
      implicit val b: Blocker     = blocker
      implicit val c: Console[IO] = console

      for {
        _      <- proj.run
        exists <- io.file.exists[IO](blocker, cfg.postgres.offsetFile)
        _       = exists shouldEqual true
        offset <- Offset.load(cfg.postgres.offsetFile)
        _       = offset.nonEmpty shouldEqual true
      } yield ()
    }
  }
} 
Example 5
Source File: OffsetSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.sse

import java.nio.file.{Files, Path}
import java.util.UUID

import cats.effect.{Blocker, ContextShift, IO}
import ch.epfl.bluebrain.nexus.cli.Console
import ch.epfl.bluebrain.nexus.cli.dummies.TestConsole
import org.scalatest.OptionValues
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

class OffsetSpec extends AnyWordSpecLike with Matchers with OptionValues {

  implicit protected val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit private val blocker: Blocker       = Blocker.liftExecutionContext(ExecutionContext.global)
  implicit private val console: Console[IO]   = TestConsole[IO].unsafeRunSync()

  abstract class Ctx {
    protected val uuid: UUID = UUID.randomUUID
    protected val file: Path = Files.createTempFile("offset", ".conf")
  }

  "An offset" should {
    "be loaded from configuration" in new Ctx {
      Files.writeString(file, uuid.toString)
      (for {
        offset <- Offset.load(file)
        _       = offset.value shouldEqual Offset(uuid)
      } yield Files.deleteIfExists(file)).unsafeRunSync()
    }

    "be loaded from configuration but failed to convert to UUID" in new Ctx {
      Files.writeString(file, "not-an-uuid")
      (for {
        offset <- Offset.load(file)
        _       = offset shouldEqual None
      } yield Files.deleteIfExists(file)).unsafeRunSync()
    }

    "be written to file" in new Ctx {
      val offset = Offset(UUID.randomUUID())
      (for {
        _ <- offset.write(file)
        _  = Files.readString(file) shouldEqual offset.value.toString
      } yield Files.deleteIfExists(file)).unsafeRunSync()
    }
  }
} 
Example 6
Source File: JDBCJournal.scala    From zio-actors   with Apache License 2.0 5 votes vote down vote up
package zio.actors.persistence.jdbc

import cats.effect.Blocker
import com.zaxxer.hikari.HikariDataSource
import doobie._
import doobie.hikari.HikariTransactor
import doobie.implicits._
import zio.{ IO, Promise, Runtime, Task, UIO, ZIO }
import zio.actors.ActorSystemUtils
import zio.actors.persistence.PersistenceId.PersistenceId
import zio.actors.persistence.journal.Journal
import zio.actors.persistence.jdbc.JDBCConfig.DbConfig
import zio.blocking.Blocking
import zio.interop.catz._

private[actors] final class JDBCJournal[Ev](tnx: Transactor[Task]) extends Journal[Ev] {

  override def persistEvent(persistenceId: PersistenceId, event: Ev): Task[Unit] =
    for {
      bytes <- ActorSystemUtils.objToByteArray(event)
      _     <- SqlEvents.persistEvent(persistenceId, bytes).run.transact(tnx)
    } yield ()

  override def getEvents(persistenceId: PersistenceId): Task[Seq[Ev]] =
    for {
      bytes  <- SqlEvents.getEventsById(persistenceId).to[Seq].transact(tnx)
      events <- IO.collectAll(bytes.map(ActorSystemUtils.objFromByteArray(_).map(_.asInstanceOf[Ev])))
    } yield events

}

private[actors] object JDBCJournal {

  private lazy val runtime           = Runtime.default
  private lazy val transactorPromise = runtime.unsafeRun(Promise.make[Exception, HikariTransactor[Task]])

  def getJournal[Ev](actorSystemName: String, configStr: String): Task[JDBCJournal[Ev]] =
    for {
      dbConfig <- JDBCConfig.getDbConfig(actorSystemName, configStr)
      tnx      <- getTransactor(dbConfig)
    } yield new JDBCJournal[Ev](tnx)

  private def makeTransactor(dbConfig: DbConfig): ZIO[Blocking, Throwable, HikariTransactor[Task]] =
    ZIO.runtime[Blocking].flatMap { implicit rt =>
      for {
        transactEC <- UIO(rt.environment.get.blockingExecutor.asEC)
        connectEC   = rt.platform.executor.asEC
        ds          = new HikariDataSource()
        _           = ds.setJdbcUrl(dbConfig.dbURL.value)
        _           = ds.setUsername(dbConfig.dbUser.value)
        _           = ds.setPassword(dbConfig.dbPass.value)
        transactor <- IO.effect(HikariTransactor.apply[Task](ds, connectEC, Blocker.liftExecutionContext(transactEC)))
      } yield transactor
    }

  private def getTransactor(dbConfig: DbConfig): Task[HikariTransactor[Task]] =
    transactorPromise.poll.flatMap {
      case Some(value) => value
      case None        =>
        for {
          newTnx <- makeTransactor(dbConfig).provideLayer(Blocking.live)
          _      <- transactorPromise.succeed(newTnx)
        } yield newTnx
    }

} 
Example 7
Source File: package.scala    From pureconfig   with Mozilla Public License 2.0 5 votes vote down vote up
package pureconfig.module.catseffect

import scala.language.higherKinds
import scala.reflect.ClassTag

import cats.effect.{ Blocker, ContextShift, Sync }
import pureconfig.{ ConfigReader, ConfigSource, Derivation }
import pureconfig.module.catseffect

package object syntax {

  implicit class CatsEffectConfigSource(private val cs: ConfigSource) extends AnyVal {

    @inline
    final def loadF[F[_], A](blocker: Blocker)(implicit F: Sync[F], csf: ContextShift[F], reader: Derivation[ConfigReader[A]], ct: ClassTag[A]): F[A] =
      catseffect.loadF(cs, blocker)

    @deprecated("Use `cs.loadF[F, A](blocker)` instead", "0.12.3")
    def loadF[F[_], A](implicit F: Sync[F], reader: Derivation[ConfigReader[A]], ct: ClassTag[A]): F[A] =
      catseffect.loadF(cs)
  }
} 
Example 8
Source File: EthereumPostgresSpout.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.examples.blockchain.spouts

import cats.effect.Blocker
import cats.effect.IO
import com.raphtory.core.components.Spout.SpoutTrait
import doobie.implicits._
import doobie.util.ExecutionContexts
import doobie.util.transactor.Transactor

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.duration.MILLISECONDS
import scala.concurrent.duration.SECONDS

class EthereumPostgresSpout extends SpoutTrait {
  var startBlock = System.getenv().getOrDefault("STARTING_BLOCK", "46147").trim.toInt //first block to have a transaction by default
  val batchSize  = System.getenv().getOrDefault("BLOCK_BATCH_SIZE", "100").trim.toInt //number of blocks to pull each query
  val maxblock   = System.getenv().getOrDefault("MAX_BLOCK", "8828337").trim.toInt    //Maximum block in database to stop querying once this is reached

  val dbURL      = System.getenv().getOrDefault("DB_URL", "jdbc:postgresql:ether").trim //db connection string, default is for local with db called ether
  val dbUSER     = System.getenv().getOrDefault("DB_USER", "postgres").trim             //db user defaults to postgres
  val dbPASSWORD = System.getenv().getOrDefault("DB_PASSWORD", "").trim                 //default no password

  // querying done with doobie wrapper for JDBC (https://tpolecat.github.io/doobie/)
  implicit val cs = IO.contextShift(ExecutionContexts.synchronous)
  val dbconnector = Transactor.fromDriverManager[IO](
          "org.postgresql.Driver",
          dbURL,
          dbUSER,
          dbPASSWORD,
          Blocker.liftExecutionContext(ExecutionContexts.synchronous)
  )

  override def ProcessSpoutTask(message: Any): Unit = message match {
    case StartSpout  => AllocateSpoutTask(Duration(1, MILLISECONDS), "nextBatch")
    case "nextBatch" => running()
    case _           => println("message not recognized!")
  }

  protected def running(): Unit = {
    sql"select from_address, to_address, value,block_timestamp from transactions where block_number >= $startBlock AND block_number < ${startBlock + batchSize} "
      .query[
              (String, String, String, String)
      ]                                      //get the to,from,value and time for transactions within the set block batch
      .to[List]                              // ConnectionIO[List[String]]
      .transact(dbconnector)                 // IO[List[String]]
      .unsafeRunSync                         // List[String]
      .foreach(x => sendTuple(x.toString())) //send each transaction to the routers

    startBlock += batchSize                                   //increment batch for the next query
    if (startBlock > maxblock) stop()                         //if we have reached the max block we stop querying the database
    AllocateSpoutTask(Duration(1, MILLISECONDS), "nextBatch") // line up the next batch
  }
} 
Example 9
Source File: Settings.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package controllers

import java.net.URI

import cats.effect.{Blocker, ContextShift, IO}
import cats.implicits._
import com.typesafe.config.{Config, ConfigFactory}
import play.api.{ConfigLoader, Configuration}
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import vinyldns.core.repository.DataStoreConfig

import scala.collection.JavaConverters._
import scala.concurrent.duration._

// $COVERAGE-OFF$
class Settings(private val config: Configuration) {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  val ldapUser: String = config.get[String]("LDAP.user")
  val ldapPwd: String = config.get[String]("LDAP.password")
  val ldapDomain: String = config.get[String]("LDAP.domain")

  val ldapSearchBase: List[LdapSearchDomain] = config.get[List[LdapSearchDomain]]("LDAP.searchBase")
  val ldapCtxFactory: String = config.get[String]("LDAP.context.initialContextFactory")
  val ldapSecurityAuthentication: String = config.get[String]("LDAP.context.securityAuthentication")
  val ldapProviderUrl: URI = new URI(config.get[String]("LDAP.context.providerUrl"))
  val ldapUserNameAttribute: String =
    config.getOptional[String]("LDAP.userNameAttribute").getOrElse("sAMAccountName")

  val ldapSyncEnabled: Boolean =
    config.getOptional[Boolean]("LDAP.user-sync.enabled").getOrElse(false)
  val ldapSyncPollingInterval: FiniteDuration = config
    .getOptional[Int]("LDAP.user-sync.hours-polling-interval")
    .getOrElse(24)
    .hours

  val portalTestLogin: Boolean = config.getOptional[Boolean]("portal.test_login").getOrElse(false)

  val dataStoreConfigs: IO[List[DataStoreConfig]] =
    Blocker[IO].use { blocker =>
      ConfigSource
        .fromConfig(config.underlying)
        .at("data-stores")
        .loadF[IO, List[String]](blocker)
        .flatMap { lst =>
          lst
            .map(
              ConfigSource.fromConfig(config.underlying).at(_).loadF[IO, DataStoreConfig](blocker)
            )
            .parSequence
        }
    }

  val cryptoConfig = IO(config.get[Config]("crypto"))

  implicit def ldapSearchDomainLoader: ConfigLoader[List[LdapSearchDomain]] =
    new ConfigLoader[List[LdapSearchDomain]] {
      def load(config: Config, path: String): List[LdapSearchDomain] = {
        val domains = config.getConfigList(path).asScala.map { domainConfig ⇒
          val org = domainConfig.getString("organization")
          val domain = domainConfig.getString("domainName")
          LdapSearchDomain(org, domain)
        }
        domains.toList
      }
    }
}
// $COVERAGE-ON$
object Settings extends Settings(Configuration(ConfigFactory.load())) 
Example 10
Source File: APIMetrics.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.metrics
import java.util.concurrent.TimeUnit

import cats.effect.{Blocker, ContextShift, IO}
import com.codahale.metrics.Slf4jReporter.LoggingLevel
import com.codahale.metrics.{Metric, MetricFilter, ScheduledReporter, Slf4jReporter}
import com.typesafe.config.Config
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import vinyldns.core.VinylDNSMetrics

final case class MemoryMetricsSettings(logEnabled: Boolean, logSeconds: Int)
final case class APIMetricsSettings(memory: MemoryMetricsSettings)

object APIMetrics {
  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  // Output all memory metrics to the log, do not start unless configured
  private val logReporter = Slf4jReporter
    .forRegistry(VinylDNSMetrics.metricsRegistry)
    .filter(new MetricFilter {
      def matches(name: String, metric: Metric): Boolean =
        name.startsWith("memory")
    })
    .withLoggingLevel(LoggingLevel.INFO)
    .outputTo(LoggerFactory.getLogger("MemStats"))
    .convertRatesTo(TimeUnit.SECONDS)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .build()

  def initialize(
      settings: APIMetricsSettings,
      reporter: ScheduledReporter = logReporter
  ): IO[Unit] = IO {
    if (settings.memory.logEnabled) {
      reporter.start(settings.memory.logSeconds, TimeUnit.SECONDS)
    }
  }

  def loadSettings(config: Config): IO[APIMetricsSettings] =
    Blocker[IO].use(
      ConfigSource.fromConfig(config).loadF[IO, APIMetricsSettings](_)
    )
} 
Example 11
Source File: EmailNotifierProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.notifier.email

import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.{Blocker, ContextShift, IO}
import javax.mail.Session

class EmailNotifierProvider extends NotifierProvider {
  import EmailNotifierConfig._

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
    for {
      emailConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, EmailNotifierConfig](_)
      )
      session <- createSession(emailConfig)
    } yield new EmailNotifier(emailConfig, session, userRepository)

  def createSession(config: EmailNotifierConfig): IO[Session] = IO {
    Session.getInstance(config.smtp)
  }
} 
Example 12
Source File: SnsNotifierProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.notifier.sns

import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.{Blocker, ContextShift, IO}
import com.amazonaws.services.sns.AmazonSNS
import org.slf4j.LoggerFactory
import com.amazonaws.services.sns.AmazonSNSClientBuilder
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.auth.AWSStaticCredentialsProvider
import com.amazonaws.auth.BasicAWSCredentials

class SnsNotifierProvider extends NotifierProvider {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)
  private val logger = LoggerFactory.getLogger(classOf[SnsNotifierProvider])

  def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
    for {
      snsConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, SnsNotifierConfig](_)
      )
      client <- createClient(snsConfig)
    } yield new SnsNotifier(snsConfig, client)

  def createClient(config: SnsNotifierConfig): IO[AmazonSNS] = IO {
    logger.error(
      "Setting up sns notifier client with settings: " +
        s"service endpoint: ${config.serviceEndpoint}; " +
        s"signing region: ${config.signingRegion}; " +
        s"topic name: ${config.topicArn}"
    )
    AmazonSNSClientBuilder.standard
      .withEndpointConfiguration(
        new EndpointConfiguration(config.serviceEndpoint, config.signingRegion)
      )
      .withCredentials(
        new AWSStaticCredentialsProvider(
          new BasicAWSCredentials(config.accessKey, config.secretKey)
        )
      )
      .build()
  }

} 
Example 13
Source File: SqsMessageQueueProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.sqs.queue
import cats.effect.IO
import cats.implicits._
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sqs.model.QueueDoesNotExistException
import com.amazonaws.services.sqs.{AmazonSQSAsync, AmazonSQSAsyncClientBuilder}
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.Blocker
import vinyldns.core.queue.{MessageQueue, MessageQueueConfig, MessageQueueProvider}

import scala.util.matching.Regex
import cats.effect.ContextShift

class SqsMessageQueueProvider extends MessageQueueProvider {
  import SqsMessageQueueProvider._

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: MessageQueueConfig): IO[MessageQueue] =
    for {
      settingsConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, SqsMessageQueueSettings](_)
      )
      _ <- IO.fromEither(validateQueueName(settingsConfig.queueName))
      client <- setupClient(settingsConfig)
      queueUrl <- setupQueue(client, settingsConfig.queueName)
      _ <- IO(logger.error(s"Queue URL: $queueUrl\n"))
    } yield new SqsMessageQueue(queueUrl, client)

  def validateQueueName(queueName: String): Either[InvalidQueueName, String] = {

    
    val validQueueNameRegex: Regex = """^([\w\-]{1,80})$""".r

    validQueueNameRegex
      .findFirstIn(queueName)
      .map(Right(_))
      .getOrElse(Left(InvalidQueueName(queueName)))
  }

  def setupClient(sqsMessageQueueSettings: SqsMessageQueueSettings): IO[AmazonSQSAsync] =
    IO {
      logger.error(
        s"Setting up queue client with settings: " +
          s"service endpoint: ${sqsMessageQueueSettings.serviceEndpoint}; " +
          s"signing region: ${sqsMessageQueueSettings.serviceEndpoint}; " +
          s"queue name: ${sqsMessageQueueSettings.queueName}"
      )
      AmazonSQSAsyncClientBuilder
        .standard()
        .withEndpointConfiguration(
          new EndpointConfiguration(
            sqsMessageQueueSettings.serviceEndpoint,
            sqsMessageQueueSettings.signingRegion
          )
        )
        .withCredentials(
          new AWSStaticCredentialsProvider(
            new BasicAWSCredentials(
              sqsMessageQueueSettings.accessKey,
              sqsMessageQueueSettings.secretKey
            )
          )
        )
        .build()
    }

  def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] =
    // Create queue if it doesn't exist
    IO {
      logger.error(s"Setting up queue with name [$queueName]")
      client.getQueueUrl(queueName).getQueueUrl
    }.recoverWith {
      case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl)
    }
}

object SqsMessageQueueProvider {
  final case class InvalidQueueName(queueName: String)
      extends Throwable(
        s"Invalid queue name: $queueName. Must be 1-80 alphanumeric, hyphen or underscore characters. FIFO queues " +
          "(queue names ending in \".fifo\") are not supported."
      )

  private val logger = LoggerFactory.getLogger(classOf[SqsMessageQueueProvider])
} 
Example 14
Source File: MySqlMessageQueueProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.mysql.queue

import cats.effect.IO
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import scalikejdbc.{ConnectionPool, DataSourceConnectionPool}
import scalikejdbc.config.DBs
import vinyldns.core.queue.{MessageQueue, MessageQueueConfig, MessageQueueProvider}
import vinyldns.mysql.{HikariCloser, MySqlConnectionConfig, MySqlDataSourceSettings}
import vinyldns.mysql.MySqlConnector._
import cats.effect.ContextShift
import cats.effect.Blocker

class MySqlMessageQueueProvider extends MessageQueueProvider {

  private val logger = LoggerFactory.getLogger(classOf[MySqlMessageQueueProvider])

  implicit val mySqlPropertiesReader: ConfigReader[Map[String, AnyRef]] =
    MySqlConnectionConfig.mySqlPropertiesReader

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: MessageQueueConfig): IO[MessageQueue] =
    for {
      connectionSettings <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, MySqlConnectionConfig](_)
      )
      _ <- runDBMigrations(connectionSettings)
      _ <- setupQueueConnection(connectionSettings)
    } yield new MySqlMessageQueue(config.maxRetries)

  def setupQueueConnection(config: MySqlConnectionConfig): IO[Unit] = {
    val queueConnectionSettings = MySqlDataSourceSettings(config, "mysqlQueuePool")

    getDataSource(queueConnectionSettings).map { dataSource =>
      logger.error("configuring connection pool for queue")

      // note this is being called 2x in the case you use the mysql datastores and
      // loader. That should be ok
      DBs.loadGlobalSettings()

      // Configure the connection pool
      ConnectionPool.add(
        MySqlMessageQueue.QUEUE_CONNECTION_NAME,
        new DataSourceConnectionPool(dataSource, closer = new HikariCloser(dataSource))
      )

      logger.error("queue connection pool init complete")
    }
  }

} 
Example 15
Source File: InfluxProjectionSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.influx

import java.nio.file.Files

import cats.effect.{Blocker, IO}
import ch.epfl.bluebrain.nexus.cli.Console
import ch.epfl.bluebrain.nexus.cli.clients.InfluxClient
import ch.epfl.bluebrain.nexus.cli.config.AppConfig
import ch.epfl.bluebrain.nexus.cli.modules.influx.InfluxProjection
import ch.epfl.bluebrain.nexus.cli.sse.Offset
import fs2.io

//noinspection SqlNoDataSourceInspection
class InfluxProjectionSpec extends AbstractInfluxSpec {

  "A InfluxProjection" should {
    val brainParcelationExpected = jsonContentOf("/templates/influxdb-results-brain-parcelation.json")
    val cellRecordExpected       = jsonContentOf("/templates/influxdb-results-cell-record.json")
    "project distribution sizes" in { (proj: InfluxProjection[IO], client: InfluxClient[IO]) =>
      for {
        _                <- proj.run
        brainQueryResult <- client.query("""SELECT * FROM "brainParcelation"""")
        cellQueryResult  <- client.query("""SELECT * FROM "cellRecord"""")
        _                 = brainQueryResult shouldEqual Right(brainParcelationExpected)
        _                 = cellQueryResult shouldEqual Right(cellRecordExpected)
      } yield ()
    }
    "save offset" in { (cfg: AppConfig, blocker: Blocker, proj: InfluxProjection[IO], console: Console[IO]) =>
      implicit val b: Blocker     = blocker
      implicit val c: Console[IO] = console
      for {
        _      <- proj.run
        exists <- io.file.exists[IO](blocker, cfg.influx.offsetFile)
        _       = exists shouldEqual true
        _       = println(s"Offset file content '${Files.readString(cfg.influx.offsetFile)}'")
        offset <- Offset.load(cfg.influx.offsetFile)
        _       = offset.nonEmpty shouldEqual true
      } yield ()
    }
  }
} 
Example 16
Source File: DatabaseConfig.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore.config

import cats.syntax.functor._
import cats.effect.{Async, Blocker, ContextShift, Resource, Sync}
import doobie.hikari.HikariTransactor
import org.flywaydb.core.Flyway

import scala.concurrent.ExecutionContext

case class DatabaseConnectionsConfig(poolSize: Int)
case class DatabaseConfig(
    url: String,
    driver: String,
    user: String,
    password: String,
    connections: DatabaseConnectionsConfig,
)

object DatabaseConfig {
  def dbTransactor[F[_]: Async: ContextShift](
      dbc: DatabaseConfig,
      connEc: ExecutionContext,
      blocker: Blocker,
  ): Resource[F, HikariTransactor[F]] =
    HikariTransactor
      .newHikariTransactor[F](dbc.driver, dbc.url, dbc.user, dbc.password, connEc, blocker)

  
  def initializeDb[F[_]](cfg: DatabaseConfig)(implicit S: Sync[F]): F[Unit] =
    S.delay {
        val fw: Flyway = {
          Flyway
            .configure()
            .dataSource(cfg.url, cfg.user, cfg.password)
            .load()
        }
        fw.migrate()
      }
      .as(())
} 
Example 17
Source File: fileIO.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.files
import cats.effect.IO
import fs2._
import fs2.io.readInputStream
import fs2.io.file._
import cats.effect.Blocker
import cats.effect.ContextShift
import cats.effect.Sync
import java.nio.file.Path
sealed trait FileIO {
  def createAndWriteFromResource(file: Path, resource: String): IO[Unit]

  def createAndWrite(file: Path, content: String): IO[Unit]
}

class DiskFileIO()(implicit cs: ContextShift[IO], s: Sync[IO]) extends FileIO {
  override def createAndWriteFromResource(file: Path, resourceName: String): IO[Unit] =
    Blocker[IO].use { blocker =>
      val stream = IO { getClass().getResourceAsStream(resourceName) }

      createDirectories(blocker, file.getParent()) *>
        readInputStream(stream, 8192, blocker)
          .through(writeAll(file, blocker))
          .compile
          .drain
    }

  override def createAndWrite(file: Path, content: String): IO[Unit] =
    Blocker[IO].use { blocker =>
      createDirectories(blocker, file.getParent()) *>
        Stream(content)
          .through(text.utf8Encode)
          .through(writeAll(file, blocker))
          .compile
          .drain
    }
} 
Example 18
Source File: Db.scala    From tamer   with MIT License 5 votes vote down vote up
package tamer
package db

import java.sql.SQLException
import java.time.Instant

import cats.effect.Blocker
import doobie.hikari.HikariTransactor
import doobie.implicits._
import doobie.util.transactor.Transactor
import eu.timepit.refined.auto._
import fs2.{Chunk, Stream}
import log.effect.LogWriter
import log.effect.zio.ZioLogWriter.log4sFromName
import tamer.config.{DbConfig, QueryConfig}
import zio._
import zio.interop.catz._

import scala.concurrent.ExecutionContext

trait Db extends Serializable {
  val db: Db.Service[Any]
}

object Db {
  implicit class InstantOps(ours: Instant) {
    def -(theirs: Instant): Long = ours.toEpochMilli - theirs.toEpochMilli
  }

  case class ChunkWithMetadata[V](chunk: Chunk[V], pulledAt: Instant = Instant.now())
  case class ValueWithMetadata[V](value: V, pulledAt: Instant = Instant.now())

  trait Service[R] {
    def runQuery[K, V, State](
        tnx: Transactor[Task],
        setup: Setup[K, V, State],
        queryConfig: QueryConfig
    )(state: State, q: Queue[(K, V)]): ZIO[R, DbError, State]
  }

  object > extends Service[Db] {
    override final def runQuery[K, V, State](
        tnx: Transactor[Task],
        setup: Setup[K, V, State],
        queryConfig: QueryConfig
    )(state: State, q: Queue[(K, V)]): ZIO[Db, DbError, State] = ZIO.accessM(_.db.runQuery(tnx, setup, queryConfig)(state, q))
  }

  trait Live extends Db {
    override final val db: Service[Any] = new Service[Any] {
      private[this] val logTask: Task[LogWriter[Task]] = log4sFromName.provide("tamer.Db.Live")
      override final def runQuery[K, V, State](
          tnx: Transactor[Task],
          setup: Setup[K, V, State],
          queryConfig: QueryConfig
      )(state: State, q: Queue[(K, V)]): IO[DbError, State] =
        (for {
          log   <- logTask
          query <- UIO(setup.buildQuery(state))
          _     <- log.debug(s"running ${query.sql} with params derived from $state").ignore
          start <- UIO(Instant.now())
          values <-
            query
              .streamWithChunkSize(queryConfig.fetchChunkSize)
              .chunks
              .transact(tnx)
              .map(c => ChunkWithMetadata(c))
              .evalTap(c => q.offerAll(c.chunk.iterator.to(LazyList).map(v => setup.valueToKey(v) -> v)))
              .flatMap(c => Stream.chunk(c.chunk).map(v => ValueWithMetadata(v, c.pulledAt)))
              .compile
              .toList
          newState <- setup.stateFoldM(state)(
            QueryResult(
              ResultMetadata(values.headOption.fold(Instant.now())(_.pulledAt) - start),
              values.map(_.value)
            )
          )
        } yield newState).mapError { case e: Exception => DbError(e.getLocalizedMessage) }
    }
  }

  def mkTransactor(db: DbConfig, connectEC: ExecutionContext, transactEC: ExecutionContext): Managed[DbError, HikariTransactor[Task]] =
    Managed {
      HikariTransactor
        .newHikariTransactor[Task](db.driver, db.uri, db.username, db.password, connectEC, Blocker.liftExecutionContext(transactEC))
        .allocated
        .map {
          case (ht, cleanup) => Reservation(ZIO.succeed(ht), _ => cleanup.orDie)
        }
        .uninterruptible
        .refineToOrDie[SQLException]
        .mapError(sqle => DbError(sqle.getLocalizedMessage()))
    }
} 
Example 19
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.http4s

import caliban.ExampleData._
import caliban.ExampleService.ExampleService
import caliban.{ ExampleApi, ExampleService, Http4sAdapter }
import cats.data.Kleisli
import cats.effect.Blocker
import org.http4s.StaticFile
import org.http4s.implicits._
import org.http4s.server.Router
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.server.middleware.CORS
import zio._
import zio.blocking.Blocking
import zio.interop.catz._

import scala.concurrent.ExecutionContext

object ExampleApp extends App {

  type ExampleTask[A] = RIO[ZEnv with ExampleService, A]

  override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
    ZIO
      .runtime[ZEnv with ExampleService]
      .flatMap(implicit runtime =>
        for {
          blocker     <- ZIO.access[Blocking](_.get.blockingExecutor.asEC).map(Blocker.liftExecutionContext)
          interpreter <- ExampleApi.api.interpreter
          _ <- BlazeServerBuilder[ExampleTask](ExecutionContext.global)
                .bindHttp(8088, "localhost")
                .withHttpApp(
                  Router[ExampleTask](
                    "/api/graphql" -> CORS(Http4sAdapter.makeHttpService(interpreter)),
                    "/ws/graphql"  -> CORS(Http4sAdapter.makeWebSocketService(interpreter)),
                    "/graphiql"    -> Kleisli.liftF(StaticFile.fromResource("/graphiql.html", blocker, None))
                  ).orNotFound
                )
                .resource
                .toManaged
                .useForever
        } yield ()
      )
      .provideCustomLayer(ExampleService.make(sampleCharacters))
      .exitCode
} 
Example 20
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.tapir

import caliban.interop.tapir._
import caliban.tapir.Endpoints._
import caliban.{ GraphQL, Http4sAdapter }
import cats.data.Kleisli
import cats.effect.Blocker
import org.http4s.StaticFile
import org.http4s.implicits._
import org.http4s.server.Router
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.server.middleware.CORS
import sttp.tapir.server.ServerEndpoint
import zio._
import zio.blocking.Blocking
import zio.interop.catz._
import zio.interop.catz.implicits._

import scala.concurrent.ExecutionContext

object ExampleApp extends CatsApp {

  // approach 1: using `Endpoint` and providing the logic
  val graphql: GraphQL[Any] =
    addBook.toGraphQL((bookAddLogic _).tupled) |+|
      deleteBook.toGraphQL((bookDeleteLogic _).tupled) |+|
      booksListing.toGraphQL((bookListingLogic _).tupled)

  // approach 2: using the `ServerEndpoint` where logic is already provided
  type MyIO[+A] = IO[String, A]

  val addBookEndpoint: ServerEndpoint[(Book, String), String, Unit, Nothing, MyIO] =
    addBook.serverLogic[MyIO] { case (book, token) => bookAddLogic(book, token).either }
  val deleteBookEndpoint: ServerEndpoint[(String, String), String, Unit, Nothing, MyIO] =
    deleteBook.serverLogic[MyIO] { case (title, token) => bookDeleteLogic(title, token).either }
  val booksListingEndpoint: ServerEndpoint[(Option[Int], Option[Int]), Nothing, List[Book], Nothing, UIO] =
    booksListing.serverLogic[UIO] { case (year, limit) => bookListingLogic(year, limit).map(Right(_)) }

  val graphql2: GraphQL[Any] =
    addBookEndpoint.toGraphQL |+| deleteBookEndpoint.toGraphQL |+| booksListingEndpoint.toGraphQL

  override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
    (for {
      blocker     <- ZIO.access[Blocking](_.get.blockingExecutor.asEC).map(Blocker.liftExecutionContext)
      interpreter <- graphql.interpreter
      _ <- BlazeServerBuilder[Task](ExecutionContext.global)
            .bindHttp(8088, "localhost")
            .withHttpApp(
              Router[Task](
                "/api/graphql" -> CORS(Http4sAdapter.makeHttpService(interpreter)),
                "/graphiql"    -> Kleisli.liftF(StaticFile.fromResource("/graphiql.html", blocker, None))
              ).orNotFound
            )
            .resource
            .toManaged
            .useForever
    } yield ()).exitCode
} 
Example 21
Source File: FederatedApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.federation

import caliban.Http4sAdapter
import caliban.federation.FederationData.characters.sampleCharacters
import caliban.federation.FederationData.episodes.sampleEpisodes
import cats.data.Kleisli
import cats.effect.Blocker
import org.http4s.StaticFile
import org.http4s.implicits._
import org.http4s.server.Router
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.server.middleware.CORS
import zio._
import zio.blocking.Blocking
import zio.interop.catz._

import scala.concurrent.ExecutionContext

object FederatedApp extends CatsApp {
  type ExampleTask[A] = RIO[ZEnv, A]

  val service1 = CharacterService
    .make(sampleCharacters)
    .memoize
    .use(layer =>
      for {
        blocker     <- ZIO.access[Blocking](_.get.blockingExecutor.asEC).map(Blocker.liftExecutionContext)
        interpreter <- FederatedApi.Characters.api.interpreter.map(_.provideCustomLayer(layer))
        _ <- BlazeServerBuilder[ExampleTask](ExecutionContext.global)
              .bindHttp(8089, "localhost")
              .withHttpApp(
                Router[ExampleTask](
                  "/api/graphql" -> CORS(Http4sAdapter.makeHttpService(interpreter)),
                  "/graphiql"    -> Kleisli.liftF(StaticFile.fromResource("/graphiql.html", blocker, None))
                ).orNotFound
              )
              .resource
              .toManaged
              .useForever
      } yield ()
    )

  val service2 = EpisodeService
    .make(sampleEpisodes)
    .memoize
    .use(layer =>
      for {
        blocker     <- ZIO.access[Blocking](_.get.blockingExecutor.asEC).map(Blocker.liftExecutionContext)
        interpreter <- FederatedApi.Episodes.api.interpreter.map(_.provideCustomLayer(layer))
        _ <- BlazeServerBuilder[ExampleTask](ExecutionContext.global)
              .bindHttp(8088, "localhost")
              .withHttpApp(
                Router[ExampleTask](
                  "/api/graphql" -> CORS(Http4sAdapter.makeHttpService(interpreter)),
                  "/graphiql"    -> Kleisli.liftF(StaticFile.fromResource("/graphiql.html", blocker, None))
                ).orNotFound
              )
              .resource
              .toManaged
              .useForever
      } yield ()
    )

  override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
    (service1 race service2).exitCode
} 
Example 22
Source File: RedisChannel.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.net.InetSocketAddress

import _root_.fs2._
import _root_.fs2.io.tcp.{Socket, SocketGroup}
import cats.MonadError
import cats.effect.{Blocker, Concurrent, ContextShift, Resource}
import cats.syntax.flatMap._
import laserdisc.protocol._
import log.effect.LogWriter
import scodec.Codec
import scodec.stream.{StreamDecoder, StreamEncoder}

import scala.concurrent.duration.FiniteDuration

object RedisChannel {
  private[this] final val streamDecoder = StreamDecoder.many(Codec[RESP])
  private[this] final val streamEncoder = StreamEncoder.many(Codec[RESP])

  private[fs2] final def apply[F[_]: ContextShift: LogWriter: Concurrent](
      address: InetSocketAddress,
      writeTimeout: Option[FiniteDuration],
      readMaxBytes: Int
  )(blocker: Blocker): Pipe[F, RESP, RESP] = {
    def connectedSocket: Resource[F, Socket[F]] =
      SocketGroup(blocker, nonBlockingThreadCount = 4) >>= (_.client(address, noDelay = true))

    stream =>
      Stream.resource(connectedSocket) >>= { socket =>
        val send    = stream.through(impl.send(socket.writes(writeTimeout)))
        val receive = socket.reads(readMaxBytes).through(impl.receiveResp)

        send.drain
          .covaryOutput[RESP]
          .mergeHaltBoth(receive)
          .onFinalizeWeak(socket.endOfOutput)
      }
  }

  private[this] final object impl {
    def send[F[_]: MonadError[*[_], Throwable]](socketChannel: Pipe[F, Byte, Unit])(
        implicit log: LogWriter[F]
    ): Pipe[F, RESP, Unit] =
      _.evalTap(resp => log.trace(s"sending $resp"))
        .through(streamEncoder.encode[F])
        .flatMap(bits => Stream.chunk(Chunk.bytes(bits.toByteArray)))
        .through(socketChannel)

    def receiveResp[F[_]: MonadError[*[_], Throwable]](implicit log: LogWriter[F]): Pipe[F, Byte, RESP] = {
      def framing: Pipe[F, Byte, CompleteFrame] = {
        def loopScan(bytesIn: Stream[F, Byte], previous: RESPFrame): Pull[F, CompleteFrame, Unit] =
          bytesIn.pull.uncons.flatMap {
            case Some((chunk, rest)) =>
              previous.append(chunk.toByteBuffer) match {
                case Left(ex)                    => Pull.raiseError(ex)
                case Right(frame: CompleteFrame) => Pull.output1(frame) >> loopScan(rest, EmptyFrame)
                case Right(frame: MoreThanOneFrame) =>
                  Pull.output(Chunk.vector(frame.complete)) >> {
                    if (frame.remainder.isEmpty) loopScan(rest, EmptyFrame)
                    else loopScan(rest, IncompleteFrame(frame.remainder, 0L))
                  }
                case Right(frame: IncompleteFrame) => loopScan(rest, frame)
              }

            case _ => Pull.done
          }

        bytesIn => loopScan(bytesIn, EmptyFrame).stream
      }

      pipeIn =>
        streamDecoder
          .decode(pipeIn.through(framing) map (_.bits))
          .evalTap(resp => log.trace(s"receiving $resp"))
    }
  }
} 
Example 23
Source File: SwaggerHttp4s.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.swagger.http4s

import java.util.Properties

import cats.effect.{Blocker, ContextShift, Sync}
import org.http4s.{HttpRoutes, StaticFile, Uri}
import org.http4s.dsl.Http4sDsl
import org.http4s.headers.Location

import scala.concurrent.ExecutionContext


class SwaggerHttp4s(
    yaml: String,
    contextPath: String = "docs",
    yamlName: String = "docs.yaml",
    redirectQuery: Map[String, Seq[String]] = Map.empty
) {
  private val swaggerVersion = {
    val p = new Properties()
    val pomProperties = getClass.getResourceAsStream("/META-INF/maven/org.webjars/swagger-ui/pom.properties")
    try p.load(pomProperties)
    finally pomProperties.close()
    p.getProperty("version")
  }

  def routes[F[_]: ContextShift: Sync]: HttpRoutes[F] = {
    val dsl = Http4sDsl[F]
    import dsl._

    HttpRoutes.of[F] {
      case path @ GET -> Root / `contextPath` =>
        val queryParameters = Map("url" -> Seq(s"${path.uri}/$yamlName")) ++ redirectQuery
        Uri
          .fromString(s"${path.uri}/index.html")
          .map(uri => uri.setQueryParams(queryParameters))
          .map(uri => PermanentRedirect(Location(uri)))
          .getOrElse(NotFound())
      case GET -> Root / `contextPath` / `yamlName` =>
        Ok(yaml)
      case GET -> Root / `contextPath` / swaggerResource =>
        StaticFile
          .fromResource(
            s"/META-INF/resources/webjars/swagger-ui/$swaggerVersion/$swaggerResource",
            Blocker.liftExecutionContext(ExecutionContext.global)
          )
          .getOrElseF(NotFound())
    }
  }
} 
Example 24
Source File: Http4sRequestToRawBody.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.http4s

import java.io.ByteArrayInputStream

import cats.effect.{Blocker, ContextShift, Sync}
import cats.implicits._
import fs2.Chunk
import org.http4s.headers.{`Content-Disposition`, `Content-Type`}
import org.http4s.{Charset, EntityDecoder, Request, multipart}
import sttp.model.{Header, Part}
import sttp.tapir.{RawPart, RawBodyType}

class Http4sRequestToRawBody[F[_]: Sync: ContextShift](serverOptions: Http4sServerOptions[F]) {
  def apply[R](body: fs2.Stream[F, Byte], bodyType: RawBodyType[R], charset: Option[Charset], req: Request[F]): F[R] = {
    def asChunk: F[Chunk[Byte]] = body.compile.to(Chunk)
    def asByteArray: F[Array[Byte]] = body.compile.to(Chunk).map(_.toByteBuffer.array())

    bodyType match {
      case RawBodyType.StringBody(defaultCharset) => asByteArray.map(new String(_, charset.map(_.nioCharset).getOrElse(defaultCharset)))
      case RawBodyType.ByteArrayBody              => asByteArray
      case RawBodyType.ByteBufferBody             => asChunk.map(_.toByteBuffer)
      case RawBodyType.InputStreamBody            => asByteArray.map(new ByteArrayInputStream(_))
      case RawBodyType.FileBody =>
        serverOptions.createFile(serverOptions.blockingExecutionContext, req).flatMap { file =>
          val fileSink = fs2.io.file.writeAll(file.toPath, Blocker.liftExecutionContext(serverOptions.blockingExecutionContext))
          body.through(fileSink).compile.drain.map(_ => file)
        }
      case m: RawBodyType.MultipartBody =>
        // TODO: use MultipartDecoder.mixedMultipart once available?
        implicitly[EntityDecoder[F, multipart.Multipart[F]]].decode(req, strict = false).value.flatMap {
          case Left(failure) =>
            throw new IllegalArgumentException("Cannot decode multipart body: " + failure) // TODO
          case Right(mp) =>
            val rawPartsF: Vector[F[RawPart]] = mp.parts
              .flatMap(part => part.name.flatMap(name => m.partType(name)).map((part, _)).toList)
              .map { case (part, codecMeta) => toRawPart(part, codecMeta, req).asInstanceOf[F[RawPart]] }

            val rawParts: F[Vector[RawPart]] = rawPartsF.sequence

            rawParts.asInstanceOf[F[R]] // R is Seq[RawPart]
        }
    }
  }

  private def toRawPart[R](part: multipart.Part[F], partType: RawBodyType[R], req: Request[F]): F[Part[R]] = {
    val dispositionParams = part.headers.get(`Content-Disposition`).map(_.parameters).getOrElse(Map.empty)
    val charset = part.headers.get(`Content-Type`).flatMap(_.charset)
    apply(part.body, partType, charset, req)
      .map(r =>
        Part(
          part.name.getOrElse(""),
          r,
          otherDispositionParams = dispositionParams - Part.NameDispositionParam,
          headers = part.headers.toList.map(h => Header(h.name.value, h.value))
        )
      )
  }
} 
Example 25
Source File: DoobieHikariModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.doobie

import java.util.Properties
import java.util.concurrent.{ScheduledExecutorService, ThreadFactory}

import cats.Show
import cats.effect.{Async, Blocker, ContextShift, Resource, Sync}
import cats.syntax.show._
import com.zaxxer.hikari.HikariConfig
import com.zaxxer.hikari.metrics.MetricsTrackerFactory
import doobie.enum.TransactionIsolation
import doobie.hikari.HikariTransactor

import scala.concurrent.ExecutionContext

object DoobieHikariModule {

  
  def make[F[_]: Async](
      config: DoobieHikariConfig,
      boundedConnectExecutionContext: ExecutionContext,
      blocker: Blocker,
      metricsTrackerFactory: Option[MetricsTrackerFactory] = None
  )(implicit cs: ContextShift[F]): Resource[F, HikariTransactor[F]] = {
    for {
      hikariConfig <- Resource.liftF(makeHikariConfig(config, metricsTrackerFactory))
      transactor <- HikariTransactor.fromHikariConfig(hikariConfig, boundedConnectExecutionContext, blocker)
    } yield transactor
  }

  implicit private val transactionIsolationShow: Show[TransactionIsolation] = {
    case TransactionIsolation.TransactionNone            => "TRANSACTION_NONE"
    case TransactionIsolation.TransactionReadUncommitted => "TRANSACTION_READ_UNCOMMITTED"
    case TransactionIsolation.TransactionReadCommitted   => "TRANSACTION_READ_COMMITTED"
    case TransactionIsolation.TransactionRepeatableRead  => "TRANSACTION_REPEATABLE_READ"
    case TransactionIsolation.TransactionSerializable    => "TRANSACTION_SERIALIZABLE"
  }

  private def makeHikariConfig[F[_]: Sync](
      config: DoobieHikariConfig,
      metricsTrackerFactory: Option[MetricsTrackerFactory],
      scheduledExecutorService: Option[ScheduledExecutorService] = None,
      threadFactory: Option[ThreadFactory] = None
  ): F[HikariConfig] = {
    Sync[F].delay {
      val c = new HikariConfig()
      c.setDriverClassName(config.driver)
      c.setJdbcUrl(config.url)
      c.setUsername(config.username)
      c.setPassword(config.password)
      c.setAutoCommit(config.autoCommit)
      c.setConnectionTimeout(config.connectionTimeout.toMillis)
      c.setIdleTimeout(config.idleTimeout.toMillis)
      c.setMaxLifetime(config.maxLifeTime.toMillis)
      c.setMinimumIdle(config.minimumIdle)
      c.setMaximumPoolSize(config.maximumPoolSize)
      c.setReadOnly(config.readOnly)
      c.setAllowPoolSuspension(config.allowPoolSuspension)
      c.setIsolateInternalQueries(config.isolateInternalQueries)
      c.setRegisterMbeans(config.registerMBeans)
      val dataSourceProperties = new Properties()
      config.dataSourceProperties.foreach { case (k, v) => dataSourceProperties.put(k, v) }
      c.setDataSourceProperties(dataSourceProperties)

      config.leakDetectionThreshold.map(_.toMillis).foreach(c.setLeakDetectionThreshold)
      config.initializationFailTimeout.map(_.toMillis).foreach(c.setInitializationFailTimeout)
      config.poolName.foreach(c.setPoolName)
      config.validationTimeout.map(_.toMillis).foreach(c.setValidationTimeout)
      config.transactionIsolation.map(_.show).foreach(c.setTransactionIsolation)

      scheduledExecutorService.foreach(c.setScheduledExecutor)
      threadFactory.foreach(c.setThreadFactory)

      metricsTrackerFactory.foreach(c.setMetricsTrackerFactory)
      c
    }
  }

} 
Example 26
Source File: Fs2KafkaModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.fs2kafka

import cats.effect.{Blocker, ConcurrentEffect, ContextShift, Resource, Timer}
import fs2.kafka._

object Fs2KafkaModule {

  def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K: Deserializer[F, *], V: Deserializer[F, *]](
      config: ConsumerConfig,
      blocker: Option[Blocker] = None,
      createConsumer: Option[Map[String, String] => F[KafkaByteConsumer]] = None
  ): Resource[F, KafkaConsumer[F, K, V]] = {
    def setOpt[A](maybeValue: Option[A])(
        setter: (ConsumerSettings[F, K, V], A) => ConsumerSettings[F, K, V]
    )(initial: ConsumerSettings[F, K, V]): ConsumerSettings[F, K, V] =
      maybeValue match {
        case Some(value) => setter(initial, value)
        case None        => initial
      }

    val settings = ConsumerSettings(implicitly[Deserializer[F, K]], implicitly[Deserializer[F, V]])
      .withBootstrapServers(config.bootstrapServers.mkString(","))
      .withGroupId(config.groupId)
      .pipe(setOpt(config.groupInstanceId)(_.withGroupInstanceId(_)))
      .pipe(setOpt(config.clientId)(_.withClientId(_)))
      .pipe(setOpt(config.clientRack)(_.withClientRack(_)))
      .withAutoOffsetReset(config.autoOffsetReset)
      .withEnableAutoCommit(config.enableAutoCommit)
      .withAutoCommitInterval(config.autoCommitInterval)
      .withAllowAutoCreateTopics(config.allowAutoCreateTopics)
      .withCloseTimeout(config.closeTimeout)
      .withCommitRecovery(config.commitRecovery)
      .withCommitTimeout(config.closeTimeout)
      .withDefaultApiTimeout(config.defaultApiTimeout)
      .withHeartbeatInterval(config.heartbeatInterval)
      .withIsolationLevel(config.isolationLevel)
      .withMaxPrefetchBatches(config.maxPrefetchBatches)
      .withPollInterval(config.pollInterval)
      .withPollTimeout(config.pollTimeout)
      .withMaxPollInterval(config.maxPollInterval)
      .withMaxPollRecords(config.maxPollRecords)
      .withRequestTimeout(config.requestTimeout)
      .withSessionTimeout(config.sessionTimeout)
      .pipe(setOpt(blocker)(_.withBlocker(_)))
      .withProperties(config.properties)
      .pipe(setOpt(createConsumer)(_.withCreateConsumer(_)))

    makeConsumer(settings)
  }

  def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K, V](
      settings: ConsumerSettings[F, K, V]
  ): Resource[F, KafkaConsumer[F, K, V]] = consumerResource[F].using(settings)

  def makeProducer[F[_]: ConcurrentEffect: ContextShift, K: Serializer[F, *], V: Serializer[F, *]](
      config: ProducerConfig,
      blocker: Option[Blocker] = None,
      createProducer: Option[Map[String, String] => F[KafkaByteProducer]] = None
  ): Resource[F, KafkaProducer[F, K, V]] = {
    def setOpt[A](maybeValue: Option[A])(
        setter: (ProducerSettings[F, K, V], A) => ProducerSettings[F, K, V]
    )(initial: ProducerSettings[F, K, V]): ProducerSettings[F, K, V] =
      maybeValue match {
        case Some(value) => setter(initial, value)
        case None        => initial
      }

    val settings = ProducerSettings(implicitly[Serializer[F, K]], implicitly[Serializer[F, V]])
      .withBootstrapServers(config.bootstrapServers.mkString(","))
      .pipe(setOpt(config.clientId)(_.withClientId(_)))
      .withAcks(config.acks)
      .withBatchSize(config.batchSize)
      .withCloseTimeout(config.closeTimeout)
      .withDeliveryTimeout(config.deliveryTimeout)
      .withRequestTimeout(config.requestTimeout)
      .withLinger(config.linger)
      .withEnableIdempotence(config.enableIdempotence)
      .withMaxInFlightRequestsPerConnection(config.maxInFlightRequestsPerConnection)
      .withParallelism(config.parallelism)
      .withRetries(config.retries)
      .pipe(setOpt(blocker)(_.withBlocker(_)))
      .withProperties(config.properties)
      .pipe(setOpt(createProducer)(_.withCreateProducer(_)))

    makeProducer(settings)
  }

  def makeProducer[F[_]: ConcurrentEffect: ContextShift, K, V](settings: ProducerSettings[F, K, V]): Resource[F, KafkaProducer[F, K, V]] =
    producerResource[F].using(settings)

  
  implicit private final class ChainingOps[A](private val self: A) extends AnyVal {
    def pipe[B](f: A => B): B = f(self)
  }

} 
Example 27
Source File: PostgresqlClient.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package store.postgresql

import akka.actor.ActorSystem
import cats.effect.Blocker
import doobie.util.transactor.Transactor
import doobie.util.transactor.Transactor.Aux
import env.PostgresqlConfig
import javax.sql.DataSource
import libs.logs.{IzanamiLogger, ZLogger}
import play.api.db.{Database, Databases}
import zio.{Task, UIO, ZManaged}
import zio.interop.catz._

import scala.concurrent.ExecutionContext

case class PostgresqlClient(database: Database, transactor: Transactor[Task])

object PostgresqlClient {

  def postgresqlClient(system: ActorSystem,
                       cf: Option[PostgresqlConfig]): ZManaged[ZLogger, Throwable, Option[PostgresqlClient]] =
    cf.map { config =>
        ZManaged
          .make(
            ZLogger.info(s"Creating database instance") *>
            Task {
              Databases(
                config.driver,
                config.url,
                config = Map(
                  "username" -> config.username,
                  "password" -> config.password,
                  "pool"     -> "hikaricp"
                )
              )
            }
          )(database => UIO(database.shutdown()))
          .mapM { database =>
            ZLogger.info(s"Creating transactor instance") *>
            Task {
              val ce: ExecutionContext = system.dispatchers.lookup("izanami.jdbc-connection-dispatcher")
              val te: ExecutionContext = system.dispatchers.lookup("izanami.jdbc-transaction-dispatcher")
              val transact: Aux[Task, DataSource] = Transactor
                .fromDataSource[Task](database.dataSource, ce, Blocker.liftExecutionContext(te))
              Some(PostgresqlClient(database, transact))
            }
          }
      }
      .getOrElse(ZManaged.effectTotal(None))
} 
Example 28
Source File: StoreOpsTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore

import java.nio.charset.Charset
import java.nio.file.Files
import java.util.concurrent.Executors

import cats.effect.{Blocker, IO}
import cats.effect.laws.util.TestInstances
import fs2.{Pipe, Stream}
import org.scalatest.Assertion
import org.scalatest.flatspec.AnyFlatSpec
import implicits._
import org.scalatest.matchers.must.Matchers

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext

class StoreOpsTest extends AnyFlatSpec with Matchers with TestInstances {

  implicit val cs = IO.contextShift(ExecutionContext.global)
  val blocker     = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))

  behavior of "PutOps"
  it should "buffer contents and compute size before calling Store.put" in {
    val bytes: Array[Byte] = "AAAAAAAAAA".getBytes(Charset.forName("utf-8"))
    val store              = DummyStore(_.size must be(Some(bytes.length)))

    Stream
      .emits(bytes)
      .covary[IO]
      .through(store.bufferedPut(Path("path/to/file.txt"), blocker))
      .compile
      .drain
      .unsafeRunSync()
    store.buf.toArray must be(bytes)

  }

  it should "upload a file from a nio Path" in {
    val bytes = "hello".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_.size must be(Some(bytes.length)))

    Stream
      .bracket(IO(Files.createTempFile("test-file", ".bin"))) { p => IO(p.toFile.delete).void }
      .flatMap { p =>
        Stream.emits(bytes).covary[IO].through(fs2.io.file.writeAll(p, blocker)).drain ++
          Stream.eval(store.put(p, Path("path/to/file.txt"), blocker))
      }
      .compile
      .drain
      .unsafeRunSync()
    store.buf.toArray must be(bytes)
  }

  it should "download a file to a nio path" in {
    val bytes = "hello".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_ => succeed)
    val path  = Path("path/to/file.txt")
    Stream.emits(bytes).through(store.put(path)).compile.drain.unsafeRunSync()

    Stream
      .bracket(IO(Files.createTempFile("test-file", ".bin")))(p => IO(p.toFile.delete).void)
      .flatMap { nioPath =>
        Stream.eval(store.get(path, nioPath, blocker)) >> Stream.eval {
          IO {
            Files.readAllBytes(nioPath) mustBe bytes
          }
        }
      }
      .compile
      .drain
      .unsafeRunSync()
  }
}

final case class DummyStore(check: Path => Assertion) extends Store[IO] {
  val buf = new ArrayBuffer[Byte]()
  override def put(path: Path, overwrite: Boolean): Pipe[IO, Byte, Unit] = {
    check(path)
    in => {
      buf.appendAll(in.compile.toVector.unsafeRunSync())
      Stream.emit(())
    }
  }
  override def get(path: Path, chunkSize: Int): Stream[IO, Byte]                   = Stream.emits(buf)
  override def list(path: Path, recursive: Boolean = false): Stream[IO, Path]      = ???
  override def move(src: Path, dst: Path): IO[Unit]                                = ???
  override def copy(src: Path, dst: Path): IO[Unit]                                = ???
  override def remove(path: Path): IO[Unit]                                        = ???
  override def putRotate(computePath: IO[Path], limit: Long): Pipe[IO, Byte, Unit] = ???
} 
Example 29
Source File: Metadata.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.mwz.sonar.scala

import java.nio.file.Paths
import java.nio.file.StandardOpenOption

import cats.data.NonEmptyChain
import cats.effect.Blocker
import cats.effect.ExitCode
import cats.effect.IO
import cats.effect.IOApp
import cats.instances.string._
import com.mwz.sonar.scala.metadata._
import com.mwz.sonar.scala.metadata.scalastyle.ScalastyleRules
import com.mwz.sonar.scala.metadata.scalastyle.ScalastyleRulesRepository
import com.mwz.sonar.scala.metadata.scapegoat.ScapegoatRules
import com.mwz.sonar.scala.metadata.scapegoat.ScapegoatRulesRepository
import fs2.Stream
import fs2.io.file._
import fs2.text
import io.circe.Printer
import io.circe.generic.JsonCodec
import io.circe.syntax._

@JsonCodec
final case class SonarScalaMetadata(
  rules: Rules,
  repositories: Map[String, RulesRepository]
)

@JsonCodec
final case class Rules(
  scalastyle: NonEmptyChain[Rule],
  scapegoat: NonEmptyChain[Rule]
)

object Metadata extends IOApp {
  private val metadata: SonarScalaMetadata =
    SonarScalaMetadata(
      rules = Rules(sort(ScalastyleRules.rules), sort(ScapegoatRules.rules)),
      repositories = Map(
        ScalastyleRulesRepository.RepositoryKey ->
        ScalastyleRulesRepository.rulesRepository
          .copy(rules = sort(ScalastyleRulesRepository.rulesRepository.rules)),
        ScapegoatRulesRepository.RepositoryKey ->
        ScapegoatRulesRepository.rulesRepository
          .copy(rules = sort(ScapegoatRulesRepository.rulesRepository.rules))
      )
    )
  private val printer: Printer =
    Printer.spaces2SortKeys.copy(
      colonLeft = "",
      lbraceLeft = "",
      rbraceRight = "",
      lbracketLeft = "",
      lrbracketsEmpty = "",
      rbracketRight = "",
      arrayCommaLeft = "",
      objectCommaLeft = ""
    )

  // Chain is missing sortBy, which should be added in 2.2.0.
  private def sort(rules: NonEmptyChain[Rule]): NonEmptyChain[Rule] =
    NonEmptyChain.fromNonEmptyList(rules.toNonEmptyList.sortBy(_.name))

  def run(args: List[String]): IO[ExitCode] = {
    val write: Stream[IO, Unit] = Stream.resource(Blocker[IO]).flatMap { blocker =>
      Stream[IO, String](metadata.asJson.printWith(printer))
        .through(text.utf8Encode)
        .through(
          writeAll(
            Paths.get("sonar-scala-metadata.json"),
            blocker,
            List(StandardOpenOption.TRUNCATE_EXISTING)
          )
        )
    }
    write.compile.drain.as(ExitCode.Success)
  }
} 
Example 30
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.net.{ InetSocketAddress, InetAddress }
import java.nio.channels.AsynchronousChannelGroup
import java.util.concurrent.Executors

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Resource, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200
  final val port = 8080

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(port, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val s = sockets.map { socket =>
          Stream.resource(socket).flatMap { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val tsk: IO[BitVector] = bvs.compile.toVector.map(_.foldLeft(BitVector.empty)(_ ++ _))
            val request: IO[Request] = tsk.flatMap { bv =>
              Codec[Request].decode(bv).fold(
                err => IO.raiseError(new Exception(err.toString)),
                result => IO.pure(result.value)
              )
            }
            val response: IO[Response] = request.flatMap(logic)
            val encoded: Stream[IO, Byte] = Stream.eval(response)
              .map(r => Codec[Response].encode(r).require)
              .flatMap { bv => Stream.chunk(Chunk.bytes(bv.bytes.toArray)) }
            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }
        s.parJoin[IO, Unit](maxClients)
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case RandomNumber(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          Number(v)
        }
      } else if (min === max) {
        IO.pure(Number(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case ReSeed(s) =>
      IO {
        rnd.setSeed(s)
        Ok
      }
  }
} 
Example 31
Source File: ServerSpec.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext

import cats.effect.{ IO, Blocker, ContextShift }

import org.scalatest.{ FlatSpec, Matchers, BeforeAndAfterAll }

import fs2.{ Stream, Chunk }

import scodec.bits._
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

class ServerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  val ex = Executors.newCachedThreadPool()
  val ec = ExecutionContext.fromExecutor(ex)
  val bl = Blocker.liftExecutionContext(ec)
  val (sg, closeSg) = fs2.io.tcp.SocketGroup[IO](bl).allocated.unsafeRunSync()

  override def afterAll(): Unit = {
    super.afterAll()
    closeSg.unsafeRunSync()
    ex.shutdown()
  }

  "Server" should "respond to a request" in {
    val responses: Vector[Response] = Stream(
      Server.serve(Server.port, sg).drain,
      client(Server.port)
    ).parJoin(Int.MaxValue).take(1L).compile.toVector.unsafeRunSync()
    responses should === (Vector(Ok))
  }

  def client(port: Int): Stream[IO, Response] = {
    Stream.resource(sg.client[IO](Server.addr(port))).flatMap { socket =>
      val bvs: Stream[IO, BitVector] = Stream(Codec[Request].encode(ReSeed(56)).require)
      val bs: Stream[IO, Byte] = bvs.flatMap { bv =>
        Stream.chunk(Chunk.bytes(bv.bytes.toArray))
      }
      val read = bs.through(socket.writes(Server.timeout)).drain.onFinalize(socket.endOfOutput) ++
        socket.reads(Server.bufferSize, Server.timeout).chunks.map(ch => BitVector.view(ch.toArray))
      read.fold(BitVector.empty)(_ ++ _).map(bv => Codec[Response].decode(bv).require.value)
    }
  }
} 
Example 32
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.net.{ InetSocketAddress, InetAddress }

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }

import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }

import Protocol.v1.{ Request, Response, Random, Seed, RandInt, Seeded }

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  val reqCodec: StreamDecoder[Request] = streamDecoderFromReified[Request]
  val resCodec: StreamEncoder[Response] = streamEncoderFromReified[Response]

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(1234, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] =
    serveAddr(port, sg).as(())

  def serveAddr(port: Int, sg: tcp.SocketGroup): Stream[IO, InetSocketAddress] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val x = sockets.flatMap { socket =>
          Stream.resource(socket).map { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val requests: Stream[IO, Request] = bvs.through(decPipe[IO, Request])
            val responses: Stream[IO, Response] = requests.flatMap(req => Stream.eval(logic(req)))
            val encoded: Stream[IO, Byte] = resCodec.encode(responses).flatMap { bv =>
              Stream.chunk(Chunk.bytes(bv.bytes.toArray))
            }

            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }

        Stream.emit(localAddr) ++ x.parJoin(maxClients).drain
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case Random(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          RandInt(v)
        }
      } else if (min === max) {
        IO.pure(RandInt(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case Seed(s) =>
      IO {
        rnd.setSeed(s)
        Seeded
      }
  }
} 
Example 33
Source File: Main.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.streaming

import java.io.{ InputStream, OutputStream, FileInputStream, FileOutputStream }

import cats.implicits._
import cats.effect.{ IO, IOApp, Blocker, ExitCode }

import fs2.{ Stream, Chunk, Pure }

import dev.tauri.seals.scodec.StreamCodecs._

object Main extends IOApp {

  sealed trait Color
  final case object Brown extends Color
  final case object Grey extends Color

  sealed trait Animal
  final case class Elephant(name: String, tuskLength: Float) extends Animal
  final case class Quokka(name: String, color: Color = Brown) extends Animal
  final case class Quagga(name: String, speed: Double) extends Animal

  def transform(from: InputStream, to: OutputStream)(f: Animal => Stream[Pure, Animal]): IO[Unit] = {
    Blocker[IO].use { blocker =>
      val input = fs2.io.readInputStream(
        IO.pure(from),
        chunkSize = 1024,
        blocker = blocker
      )
      val sIn: Stream[IO, Animal] = input.through(streamDecoderFromReified[Animal].toPipeByte[IO]).flatMap(f)
      val sOut: Stream[IO, Unit] = streamEncoderFromReified[Animal].encode(sIn).flatMap { bv =>
        Stream.chunk(Chunk.bytes(bv.bytes.toArray))
      }.through(fs2.io.writeOutputStream(
        IO.pure(to),
        blocker = blocker,
        closeAfterUse = true
      ))
      sOut.compile.drain
    }
  }

  val transformer: Animal => Stream[Pure, Animal] = {
    case Elephant(n, tl) => Stream(Elephant(n, tl + 17))
    case Quokka(n, Brown) => Stream(Quokka(n, Grey))
    case q @ Quokka(_, _) => Stream(q)
    case Quagga(_, _) => Stream.empty
  }

  override def run(args: List[String]): IO[ExitCode] = {
    val (from, to) = args match {
      case List(from, to, _*) =>
        (from, to)
      case List(from) =>
        (from, "out.bin")
      case _ =>
        ("in.bin", "out.bin")
    }

    val task = transform(new FileInputStream(from), new FileOutputStream(to))(transformer)
    task.as(ExitCode.Success)
  }
} 
Example 34
Source File: BoxStoreTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore.box


import java.util.concurrent.Executors

import blobstore.Path
import cats.effect.{Blocker, ContextShift, IO}
import com.box.sdk.BoxAPIConnection
import org.scalatest.matchers.must.Matchers
import org.scalatest.flatspec.AnyFlatSpec

import scala.concurrent.ExecutionContext

class BoxStoreTest extends AnyFlatSpec with Matchers {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))
  "splitPath" should "correctly split a long path" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("long/path/to/filename")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be("long" :: "path" :: "to" :: Nil)
    key must be("filename")
  }

  it should "split a single element path into a single element list and empty string key" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("filename")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be("filename"::Nil)
    key must be("")
  }

  it should "split an empty path into empty list, empty string key" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be(""::Nil)
    key must be("")
  }

} 
Example 35
Source File: SftpStore.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore
package sftp

import java.util.Date

import com.jcraft.jsch._
import cats.instances.option._

import scala.util.Try
import java.io.OutputStream

import cats.Traverse
import cats.effect.{Blocker, ConcurrentEffect, ContextShift, IO, Resource}
import cats.effect.concurrent.{MVar, Semaphore}
import fs2.concurrent.Queue

final class SftpStore[F[_]](
  absRoot: String,
  session: Session,
  blocker: Blocker,
  mVar: MVar[F, ChannelSftp],
  semaphore: Option[Semaphore[F]],
  connectTimeout: Int
)(implicit F: ConcurrentEffect[F], CS: ContextShift[F]) extends Store[F] {
  import implicits._

  import Path.SEP

  private val openChannel: F[ChannelSftp] = {
    val openF = blocker.delay{
      val ch = session.openChannel("sftp").asInstanceOf[ChannelSftp]
      ch.connect(connectTimeout)
      ch
    }
    semaphore.fold(openF){s =>
      F.ifM(s.tryAcquire)(openF, getChannel)
    }
  }

  private val getChannel = F.flatMap(mVar.tryTake) {
    case Some(channel) => F.pure(channel)
    case None => openChannel
  }

  private def channelResource: Resource[F, ChannelSftp] = Resource.make{
    getChannel
  }{
    case ch if ch.isClosed => F.unit
    case ch => F.ifM(mVar.tryPut(ch))(F.unit, SftpStore.closeChannel(semaphore, blocker)(ch))
  }

  
  def apply[F[_]](
    absRoot: String,
    fa: F[Session],
    blocker: Blocker,
    maxChannels: Option[Long] = None,
    connectTimeout: Int = 10000
  )(implicit F: ConcurrentEffect[F], CS: ContextShift[F]): fs2.Stream[F, SftpStore[F]] =
    if (maxChannels.exists(_ < 1)) {
      fs2.Stream.raiseError[F](new IllegalArgumentException(s"maxChannels must be >= 1"))
    } else {
      for {
        session <- fs2.Stream.bracket(fa)(session => F.delay(session.disconnect()))
        semaphore <- fs2.Stream.eval(Traverse[Option].sequence(maxChannels.map(Semaphore.apply[F])))
        mVar <- fs2.Stream.bracket(MVar.empty[F, ChannelSftp])(mVar => F.flatMap(mVar.tryTake)(_.fold(F.unit)(closeChannel[F](semaphore, blocker))))
      } yield new SftpStore[F](absRoot, session, blocker, mVar, semaphore, connectTimeout)
    }

  private def closeChannel[F[_]](semaphore: Option[Semaphore[F]], blocker: Blocker)(ch: ChannelSftp)(implicit F: ConcurrentEffect[F], CS: ContextShift[F]): F[Unit] =
    F.productR(semaphore.fold(F.unit)(_.release))(blocker.delay(ch.disconnect()))
} 
Example 36
Source File: GcsStore.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore.gcs

import java.nio.channels.Channels
import java.time.Instant
import java.util.Date

import blobstore.{Path, Store}
import cats.effect.{Blocker, ContextShift, Sync}
import com.google.api.gax.paging.Page
import com.google.cloud.storage.{Acl, Blob, BlobId, BlobInfo, Storage}
import com.google.cloud.storage.Storage.{BlobListOption, CopyRequest}
import fs2.{Chunk, Pipe, Stream}

import scala.jdk.CollectionConverters._

final class GcsStore[F[_]](storage: Storage, blocker: Blocker, acls: List[Acl] = Nil)(implicit F: Sync[F], CS: ContextShift[F]) extends Store[F] {

  private def _chunk(pg: Page[Blob]): Chunk[Path] = {
    val (dirs, files) = pg.getValues.asScala.toSeq.partition(_.isDirectory)
    val dirPaths = Chunk.seq(dirs.map(b => Path(root = b.getBucket, key = b.getName.stripSuffix("/"), size = None, isDir = true, lastModified = None)))
    val filePaths = Chunk.seq(files.map{b =>
      val size = Option(b.getSize: java.lang.Long).map(_.toLong) // Prevent throwing NPE (see https://github.com/scala/bug/issues/9634)
      val lastModified = Option(b.getUpdateTime: java.lang.Long).map(millis => Date.from(Instant.ofEpochMilli(millis))) // Prevent throwing NPE (see https://github.com/scala/bug/issues/9634)
      Path(b.getBucket, key = b.getName, size = size, isDir = false, lastModified = lastModified)
    })
    Chunk.concat(List(dirPaths, filePaths))
  }

  def list(path: Path): fs2.Stream[F, Path] = {
    Stream.unfoldChunkEval[F, () => Option[Page[Blob]], Path]{
      () => Some(storage.list(path.root, BlobListOption.currentDirectory(), BlobListOption.prefix(path.key)))
    }{getPage =>
      blocker.delay{
        getPage().map{pg =>
          if (pg.hasNextPage){
            (_chunk(pg), () => Some(pg.getNextPage))
          } else {
            (_chunk(pg), () => None)
          }
        }
      }
    }
  }

  def get(path: Path, chunkSize: Int): fs2.Stream[F, Byte] = {
    val is = blocker.delay(Channels.newInputStream(storage.get(path.root, path.key).reader()))
    fs2.io.readInputStream(is, chunkSize, blocker, closeAfterUse = true)
  }

  def put(path: Path): Pipe[F, Byte, Unit] = {
    val fos = Sync[F].delay{
      val builder = {
        val b = BlobInfo.newBuilder(path.root, path.key)
        if (acls.nonEmpty) b.setAcl(acls.asJava) else b
      }
      val blobInfo = builder.build()
      val writer = storage.writer(blobInfo)
      Channels.newOutputStream(writer)
    }
    fs2.io.writeOutputStream(fos, blocker, closeAfterUse = true)
  }

  def move(src: Path, dst: Path): F[Unit] = F.productR(copy(src, dst))(remove(src))

  def copy(src: Path, dst: Path): F[Unit] = {
    val req = CopyRequest.newBuilder().setSource(src.root, src.key).setTarget(BlobId.of(dst.root, dst.key)).build()
    F.void(blocker.delay(storage.copy(req).getResult))
  }

  def remove(path: Path): F[Unit] =
    F.void(blocker.delay(storage.delete(path.root, path.key)))
}


object GcsStore{
  def apply[F[_]](
    storage: Storage,
    blocker: Blocker,
    acls: List[Acl]
  )(implicit F: Sync[F], CS: ContextShift[F]): GcsStore[F] = new GcsStore(storage, blocker, acls)
} 
Example 37
Source File: FileStore.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore
package fs

import java.nio.file.{Files, Paths, Path => NioPath}
import java.util.Date

import scala.jdk.CollectionConverters._
import cats.implicits._
import cats.effect.{Blocker, ContextShift, Sync}
import fs2.{Pipe, Stream}

final class FileStore[F[_]](fsroot: NioPath, blocker: Blocker)(implicit F: Sync[F], CS: ContextShift[F]) extends Store[F] {
  val absRoot: String = fsroot.toAbsolutePath.normalize.toString

  override def list(path: Path): fs2.Stream[F, Path] = {
    val isDir = Stream.eval(F.delay(Files.isDirectory(path)))
    val isFile = Stream.eval(F.delay(Files.exists(path)))

    val files = Stream.eval(F.delay(Files.list(path)))
      .flatMap(x => Stream.fromIterator(x.iterator.asScala))
      .evalMap(x => F.delay(
        Path(x.toAbsolutePath.toString.replaceFirst(absRoot, "")).copy(
          size = Option(Files.size(x)),
          isDir = Files.isDirectory(x),
          lastModified = Option(new Date(Files.getLastModifiedTime(path).toMillis))
        )
      ))

    val file = fs2.Stream.eval {
      F.delay {
        path.copy(
          size = Option(Files.size(path)),
          lastModified = Option(new Date(Files.getLastModifiedTime(path).toMillis))
        )
      }
    }

    isDir.ifM(files, isFile.ifM(file, Stream.empty.covaryAll[F, Path]))
  }

  override def get(path: Path, chunkSize: Int): fs2.Stream[F, Byte] = fs2.io.file.readAll[F](path, blocker, chunkSize)

  override def put(path: Path): Pipe[F, Byte, Unit] = { in =>
    val mkdir = Stream.eval(F.delay(Files.createDirectories(_toNioPath(path).getParent)).as(true))
    mkdir.ifM(
      fs2.io.file.writeAll(path, blocker).apply(in),
      Stream.raiseError[F](new Exception(s"failed to create dir: $path"))
    )
  }

  override def move(src: Path, dst: Path): F[Unit] = F.delay {
    Files.createDirectories(_toNioPath(dst).getParent)
    Files.move(src, dst)
  }.void

  override def copy(src: Path, dst: Path): F[Unit] = {
    F.delay {
      Files.createDirectories(_toNioPath(dst).getParent)
      Files.copy(src, dst)
    }.void
  }

  override def remove(path: Path): F[Unit] = F.delay({
    Files.deleteIfExists(path)
    ()
  })

  implicit private def _toNioPath(path: Path): NioPath =
    Paths.get(absRoot, path.root, path.key)

}

object FileStore{
  def apply[F[_]](fsroot: NioPath, blocker: Blocker)(implicit F: Sync[F], CS: ContextShift[F]): FileStore[F] = new FileStore(fsroot, blocker)
} 
Example 38
Source File: package.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
import java.io.OutputStream
import java.nio.file.Files

import cats.effect.{ContextShift, Sync, Blocker}
import fs2.{Pipe, Pull, Stream}
import cats.implicits._

package object blobstore {
  protected[blobstore] def _writeAllToOutputStream1[F[_]](in: Stream[F, Byte], out: OutputStream, blocker: Blocker)(
    implicit F: Sync[F], CS: ContextShift[F]): Pull[F, Nothing, Unit] = {
    in.pull.uncons.flatMap {
      case None => Pull.done
      case Some((hd, tl)) => Pull.eval[F, Unit](blocker.delay(out.write(hd.toArray))) >> _writeAllToOutputStream1(tl, out, blocker)
    }
  }

  protected[blobstore] def bufferToDisk[F[_]](chunkSize: Int, blocker: Blocker)(implicit F: Sync[F], CS: ContextShift[F])
  : Pipe[F, Byte, (Long, Stream[F, Byte])] = {
    in => Stream.bracket(F.delay(Files.createTempFile("bufferToDisk", ".bin")))(
      p => F.delay(p.toFile.delete).void).flatMap { p =>
        in.through(fs2.io.file.writeAll(p, blocker)).drain ++
        Stream.emit((p.toFile.length, fs2.io.file.readAll(p, blocker, chunkSize)))
    }
  }

} 
Example 39
Source File: StoreOpsTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore

import java.nio.charset.Charset
import java.nio.file.Files
import java.util.concurrent.Executors

import cats.effect.{Blocker, IO}
import cats.effect.laws.util.TestInstances
import cats.implicits._
import fs2.Pipe
import org.scalatest.Assertion
import org.scalatest.flatspec.AnyFlatSpec
import implicits._
import org.scalatest.matchers.must.Matchers

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext


class StoreOpsTest extends AnyFlatSpec with Matchers with TestInstances {

  implicit val cs = IO.contextShift(ExecutionContext.global)
  val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))

  behavior of "PutOps"
  it should "buffer contents and compute size before calling Store.put" in {
    val bytes: Array[Byte] = "AAAAAAAAAA".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_.size must be(Some(bytes.length)))

    fs2.Stream.emits(bytes).covary[IO].through(store.bufferedPut(Path("path/to/file.txt"), blocker)).compile.drain.unsafeRunSync()
    store.buf.toArray must be(bytes)

  }

  it should "upload a file from a nio Path" in {
    val bytes = "hello".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_.size must be(Some(bytes.length)))

    fs2.Stream.bracket(IO(Files.createTempFile("test-file", ".bin"))) { p =>
      IO(p.toFile.delete).void
    }.flatMap { p =>
      fs2.Stream.emits(bytes).covary[IO].through(fs2.io.file.writeAll(p, blocker)).drain ++
        fs2.Stream.eval(store.put(p, Path("path/to/file.txt"), blocker))
    }.compile.drain.unsafeRunSync()
    store.buf.toArray must be(bytes)
  }

}

final case class DummyStore(check: Path => Assertion) extends Store[IO] {
  val buf = new ArrayBuffer[Byte]()
  override def put(path: Path): Pipe[IO, Byte, Unit] = {
    check(path)
    in => {
      buf.appendAll(in.compile.toVector.unsafeRunSync())
      fs2.Stream.emit(())
    }
  }
  override def list(path: Path): fs2.Stream[IO, Path] = ???
  override def get(path: Path, chunkSize: Int): fs2.Stream[IO, Byte] = ???
  override def move(src: Path, dst: Path): IO[Unit] = ???
  override def copy(src: Path, dst: Path): IO[Unit] = ???
  override def remove(path: Path): IO[Unit] = ???
} 
Example 40
Source File: package.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
import java.io.OutputStream
import java.nio.file.Files

import cats.effect.{Blocker, Concurrent, ContextShift, Resource, Sync}
import fs2.{Chunk, Hotswap, Pipe, Pull, RaiseThrowable, Stream}
import cats.implicits._

package object blobstore {
  protected[blobstore] def _writeAllToOutputStream1[F[_]](in: Stream[F, Byte], out: OutputStream, blocker: Blocker)(
    implicit F: Sync[F],
    CS: ContextShift[F]
  ): Pull[F, Nothing, Unit] = {
    in.pull.uncons.flatMap {
      case None => Pull.done
      case Some((hd, tl)) =>
        Pull.eval[F, Unit](blocker.delay(out.write(hd.toArray))) >> _writeAllToOutputStream1(tl, out, blocker)
    }
  }

  protected[blobstore] def bufferToDisk[F[_]](
    chunkSize: Int,
    blocker: Blocker
  )(implicit F: Sync[F], CS: ContextShift[F]): Pipe[F, Byte, (Long, Stream[F, Byte])] = { in =>
    Stream.bracket(F.delay(Files.createTempFile("bufferToDisk", ".bin")))(p => F.delay(p.toFile.delete).void).flatMap {
      p =>
        in.through(fs2.io.file.writeAll(p, blocker)).drain ++
          Stream.emit((p.toFile.length, fs2.io.file.readAll(p, blocker, chunkSize)))
    }
  }

  private[blobstore] def putRotateBase[F[_]: Concurrent, T](
    limit: Long,
    openNewFile: Resource[F, T]
  )(consume: T => Chunk[Byte] => F[Unit]): Pipe[F, Byte, Unit] = { in =>
    Stream
      .resource(Hotswap(openNewFile))
      .flatMap {
        case (hotswap, newFile) =>
          goRotate(limit, 0L, in, newFile, hotswap, openNewFile)(
            consume = consumer => bytes => Pull.eval(consume(consumer)(bytes)).as(consumer),
            extract = Stream.emit
          ).stream
      }
  }

  private[blobstore] def goRotate[F[_]: RaiseThrowable, A, B](
    limit: Long,
    acc: Long,
    s: Stream[F, Byte],
    consumer: B,
    hotswap: Hotswap[F, A],
    resource: Resource[F, A]
  )(
    consume: B => Chunk[Byte] => Pull[F, Unit, B],
    extract: A => Stream[F, B]
  ): Pull[F, Unit, Unit] = {
    val toWrite = (limit - acc).min(Int.MaxValue.toLong).toInt
    s.pull.unconsLimit(toWrite).flatMap {
      case Some((hd, tl)) =>
        val newAcc = acc + hd.size
        consume(consumer)(hd).flatMap { consumer =>
          if (newAcc >= limit) {
            Pull
              .eval(hotswap.swap(resource))
              .flatMap(a => extract(a).pull.headOrError)
              .flatMap(nc => goRotate(limit, 0L, tl, nc, hotswap, resource)(consume, extract))
          } else {
            goRotate(limit, newAcc, tl, consumer, hotswap, resource)(consume, extract)
          }
        }
      case None => Pull.done
    }
  }

} 
Example 41
Source File: DbTransactor.scala    From zorechka-bot   with MIT License 5 votes vote down vote up
package com.wix.zorechka.repos

import cats.effect.Blocker
import com.wix.zorechka.DbConfig
import doobie.hikari.HikariTransactor
import doobie.util.ExecutionContexts
import zio.blocking.Blocking
import zio.{Managed, Task}
import zio.interop.catz._

object DbTransactor {

  def newMysqlTransactor(cfg: DbConfig)(implicit rt: zio.Runtime[Blocking]): Managed[Throwable, HikariTransactor[Task]] = {
      val res = for {
        connectEC  <- ExecutionContexts.fixedThreadPool[Task](2)
        transactEC <- ExecutionContexts.cachedThreadPool[Task]
        xa <- HikariTransactor.newHikariTransactor[Task](
          "com.mysql.jdbc.Driver",
          cfg.url, //s"jdbc:mysql://${dbConfig.host}:${config.port}/${config.mysql.schema}",
          cfg.username,
          cfg.password,
          connectEC,
          Blocker.liftExecutionContext(transactEC)
        )
      } yield xa

      res.toManaged
    }
} 
Example 42
Source File: ProcessAlg.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.io

import better.files.File
import cats.effect.{Blocker, Concurrent, ContextShift, Timer}
import cats.implicits._
import io.chrisdavenport.log4cats.Logger
import org.scalasteward.core.application.Cli.EnvVar
import org.scalasteward.core.application.Config
import org.scalasteward.core.util.Nel

trait ProcessAlg[F[_]] {
  def exec(command: Nel[String], cwd: File, extraEnv: (String, String)*): F[List[String]]

  def execSandboxed(command: Nel[String], cwd: File): F[List[String]]
}

object ProcessAlg {
  abstract class UsingFirejail[F[_]](config: Config) extends ProcessAlg[F] {
    override def execSandboxed(command: Nel[String], cwd: File): F[List[String]] = {
      val envVars = config.envVars.map(EnvVar.unapply(_).get)
      if (config.disableSandbox)
        exec(command, cwd, envVars: _*)
      else {
        val whitelisted = (cwd.pathAsString :: config.whitelistedDirectories)
          .map(dir => s"--whitelist=$dir")
        val readOnly = config.readOnlyDirectories
          .map(dir => s"--read-only=$dir")
        exec(Nel("firejail", whitelisted ++ readOnly) ::: command, cwd, envVars: _*)
      }
    }
  }

  def create[F[_]](blocker: Blocker)(implicit
      config: Config,
      contextShift: ContextShift[F],
      logger: Logger[F],
      timer: Timer[F],
      F: Concurrent[F]
  ): ProcessAlg[F] =
    new UsingFirejail[F](config) {
      override def exec(
          command: Nel[String],
          cwd: File,
          extraEnv: (String, String)*
      ): F[List[String]] =
        logger.debug(s"Execute ${command.mkString_(" ")}") >>
          process.slurp[F](
            command,
            Some(cwd.toJava),
            extraEnv.toMap,
            config.processTimeout,
            logger.trace(_),
            blocker
          )
    }
} 
Example 43
Source File: ProcessAlgTest.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.io

import better.files.File
import cats.effect.{Blocker, IO}
import java.util.concurrent.Executors
import org.scalasteward.core.TestInstances._
import org.scalasteward.core.io.ProcessAlgTest.ioProcessAlg
import org.scalasteward.core.mock.MockContext._
import org.scalasteward.core.mock.MockState
import org.scalasteward.core.util.Nel
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

class ProcessAlgTest extends AnyFunSuite with Matchers {
  test("exec echo") {
    ioProcessAlg
      .exec(Nel.of("echo", "-n", "hello"), File.currentWorkingDirectory)
      .unsafeRunSync() shouldBe List("hello")
  }

  test("exec false") {
    ioProcessAlg
      .exec(Nel.of("ls", "--foo"), File.currentWorkingDirectory)
      .attempt
      .map(_.isLeft)
      .unsafeRunSync()
  }

  test("respect the disableSandbox setting") {
    val cfg = config.copy(disableSandbox = true)
    val processAlg = new MockProcessAlg()(cfg)

    val state = processAlg
      .execSandboxed(Nel.of("echo", "hello"), File.temp)
      .runS(MockState.empty)
      .unsafeRunSync()

    state shouldBe MockState.empty.copy(
      commands = Vector(
        List("TEST_VAR=GREAT", "ANOTHER_TEST_VAR=ALSO_GREAT", File.temp.toString, "echo", "hello")
      )
    )
  }

  test("execSandboxed echo") {
    val state = processAlg
      .execSandboxed(Nel.of("echo", "hello"), File.temp)
      .runS(MockState.empty)
      .unsafeRunSync()

    state shouldBe MockState.empty.copy(
      commands = Vector(
        List(
          "TEST_VAR=GREAT",
          "ANOTHER_TEST_VAR=ALSO_GREAT",
          File.temp.toString,
          "firejail",
          s"--whitelist=${File.temp}",
          "echo",
          "hello"
        )
      )
    )
  }
}

object ProcessAlgTest {
  val blocker: Blocker = Blocker.liftExecutorService(Executors.newCachedThreadPool())
  implicit val ioProcessAlg: ProcessAlg[IO] = ProcessAlg.create[IO](blocker)
} 
Example 44
Source File: JRFutureSpec.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats.effect

import java.util.concurrent.CompletableFuture

import cats.effect.{ Blocker, ContextShift, IO }
import scala.concurrent.ExecutionContext
import munit.FunSuite

class JRFutureSpec extends FunSuite {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  val currentThread: IO[String] = IO(Thread.currentThread().getName)

  test("it shifts back once the Future is converted") {
    val ioa =
      Blocker[IO].use { blocker =>
        JRFuture.fromCompletableFuture[IO, String] {
          IO {
            val jFuture = new CompletableFuture[String]()
            jFuture.complete("foo")
            jFuture
          }
        }(blocker)
      }

    (ioa *> currentThread)
      .flatMap(t => IO(assert(t.contains("scala-execution-context-global"))))
      .unsafeToFuture()
  }

  test("it shifts back even when the CompletableFuture fails") {
    val ioa =
      Blocker[IO].use { blocker =>
        JRFuture.fromCompletableFuture[IO, String] {
          IO {
            val jFuture = new CompletableFuture[String]()
            jFuture.completeExceptionally(new RuntimeException("Purposely fail"))
            jFuture
          }
        }(blocker)
      }

    (ioa.attempt *> currentThread)
      .flatMap(t => IO(assert(t.contains("scala-execution-context-global"))))
      .unsafeToFuture()
  }

} 
Example 45
Source File: DummyCpgProvider.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.cpg

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.jdk.CollectionConverters._
import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.codepropertygraph.generated.nodes.NewMethod
import io.shiftleft.cpgserver.query.{CpgOperationFailure, CpgOperationResult, CpgOperationSuccess}
import io.shiftleft.passes.{CpgPass, DiffGraph}
import io.shiftleft.semanticcpg.language._


class DummyCpgProvider(implicit cs: ContextShift[IO]) extends CpgProvider {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val cpgMap: Map[UUID, CpgOperationResult[Cpg]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[Cpg]].asScala

  private val uuidProvider = IO(UUID.randomUUID)

  private class MyPass(cpg: Cpg) extends CpgPass(cpg) {
    override def run(): Iterator[DiffGraph] = {
      implicit val diffGraph: DiffGraph.Builder = DiffGraph.newBuilder
      NewMethod(name = "main", isExternal = false).start.store
      Iterator(diffGraph.build())
    }
  }

  override def createCpg(filenames: Set[String]): IO[UUID] = {
    val cpg = new Cpg

    for {
      cpgId <- uuidProvider
      _ <- blocker
        .blockOn(IO(new MyPass(cpg).createAndApply()))
        .runAsync {
          case Right(_) => IO(cpgMap.put(cpgId, CpgOperationSuccess(cpg))).map(_ => ())
          case Left(ex) => IO(cpgMap.put(cpgId, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield cpgId
  }

  override def retrieveCpg(uuid: UUID): OptionT[IO, CpgOperationResult[Cpg]] = {
    OptionT.fromOption(cpgMap.get(uuid))
  }
} 
Example 46
Source File: ServerAmmoniteExecutor.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.query

import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.console.scripting.AmmoniteExecutor

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import scala.jdk.CollectionConverters._

abstract class ServerAmmoniteExecutor(implicit cs: ContextShift[IO]) extends AmmoniteExecutor {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val queryResultMap: Map[UUID, CpgOperationResult[String]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[String]].asScala

  private val uuidProvider = IO { UUID.randomUUID }

  def executeQuery(cpg: Cpg, query: String): IO[UUID] = {
    for {
      resultUuid <- uuidProvider
      _ <- blocker
        .blockOn(runQuery(query, cpg))
        .runAsync {
          case Right(result) => IO(queryResultMap.put(resultUuid, CpgOperationSuccess(result.toString))).map(_ => ())
          case Left(ex)      => IO(queryResultMap.put(resultUuid, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield resultUuid
  }

  def retrieveQueryResult(queryId: UUID): OptionT[IO, CpgOperationResult[String]] = {
    OptionT.fromOption(queryResultMap.get(queryId))
  }

  def executeQuerySync(cpg: Cpg, query: String): IO[CpgOperationResult[String]] = {
    for {
      result <- runQuery(query, cpg)
        .map(v => CpgOperationSuccess(v.toString))
        .handleErrorWith(err => IO(CpgOperationFailure(err)))
    } yield result
  }
} 
Example 47
Source File: SwaggerRoute.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.route

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.circe.generic.auto._
import io.circe.syntax._
import org.http4s._
import org.http4s.circe._
import org.http4s.dsl.io._
import org.http4s.headers.Location
import org.webjars.WebJarAssetLocator
import io.shiftleft.cpgserver.route.CpgRoute.ApiError

final class SwaggerRoute {

  private val blockingEc = ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor)
  private val blocker = Blocker.liftExecutionContext(blockingEc)
  private implicit val blockingCs: ContextShift[IO] = IO.contextShift(blockingEc)

  private val swaggerUiVersion = IO { new WebJarAssetLocator().getWebJars.get("swagger-ui") }
  private val swaggerUiResources = swaggerUiVersion.map { ver =>
    s"/META-INF/resources/webjars/swagger-ui/$ver"
  }
  private val swaggerUiPath = Path("swagger-ui")

  val routes: HttpRoutes[IO] = HttpRoutes.of {
    case GET -> Root / ("swagger-ui" | "docs") =>
      PermanentRedirect(Location(Uri.unsafeFromString("swagger-ui/index.html")))

    // TODO discuss with jacob: according to scalac this is unreachable... commenting for now since it probably never worked anyway
    case req @ GET -> (Root | `swaggerUiPath`) / "swagger.yaml" =>
      StaticFile
        .fromResource("/swagger.yaml", blocker, Some(req))
        .getOrElseF(InternalServerError(ApiError("Swagger documentation is missing.").asJson))

    case req @ GET -> path if path.startsWith(swaggerUiPath) => {
      val file = path.toList.tail.mkString("/", "/", "") match {
        case f if f == "/index.html" =>
          StaticFile.fromResource[IO]("/swagger-ui/index.html", blocker, Some(req))
        case f =>
          OptionT.liftF(swaggerUiResources).flatMap { resources =>
            StaticFile.fromResource[IO](resources + f, blocker, Some(req))
          }
      }
      file.getOrElseF(InternalServerError(ApiError(s"Requested file [$file] is missing.").asJson))
    }
  }
}

object SwaggerRoute {
  def apply(): SwaggerRoute =
    new SwaggerRoute
} 
Example 48
Source File: ZIOAutoAckConsumer.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.examples

import dev.profunktor.fs2rabbit.config.Fs2RabbitConfig
import dev.profunktor.fs2rabbit.interpreter.RabbitClient

import cats.effect.{Blocker, Resource}
import zio._
import zio.interop.catz._
import zio.interop.catz.implicits._
import dev.profunktor.fs2rabbit.resiliency.ResilientStream
import java.util.concurrent.Executors

object ZIOAutoAckConsumer extends CatsApp {

  val config = Fs2RabbitConfig(
    virtualHost = "/",
    host = "127.0.0.1",
    username = Some("guest"),
    password = Some("guest"),
    port = 5672,
    ssl = false,
    connectionTimeout = 3,
    requeueOnNack = false,
    requeueOnReject = false,
    internalQueueSize = Some(500)
  )

  val blockerResource =
    Resource
      .make(Task(Executors.newCachedThreadPool()))(es => Task(es.shutdown()))
      .map(Blocker.liftExecutorService)

  override def run(args: List[String]): UIO[Int] =
    blockerResource
      .use { blocker =>
        RabbitClient[Task](config, blocker).flatMap { client =>
          ResilientStream
            .runF(new AutoAckConsumerDemo[Task](client).program)
        }
      }
      .run
      .map(_ => 0)

} 
Example 49
Source File: Publish.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.algebra

import cats.effect.syntax.effect._
import cats.effect.{Blocker, ContextShift, Effect, Sync}
import cats.syntax.functor._
import com.rabbitmq.client.{AMQP, ReturnListener}
import dev.profunktor.fs2rabbit.model._

object Publish {
  def make[F[_]: Effect: ContextShift](
      blocker: Blocker
  ): Publish[F] =
    new Publish[F] {
      override def basicPublish(channel: AMQPChannel,
                                exchangeName: ExchangeName,
                                routingKey: RoutingKey,
                                msg: AmqpMessage[Array[Byte]]): F[Unit] =
        blocker.delay {
          channel.value.basicPublish(
            exchangeName.value,
            routingKey.value,
            msg.properties.asBasicProps,
            msg.payload
          )
        }

      override def basicPublishWithFlag(channel: AMQPChannel,
                                        exchangeName: ExchangeName,
                                        routingKey: RoutingKey,
                                        flag: PublishingFlag,
                                        msg: AmqpMessage[Array[Byte]]): F[Unit] =
        blocker.delay {
          channel.value.basicPublish(
            exchangeName.value,
            routingKey.value,
            flag.mandatory,
            msg.properties.asBasicProps,
            msg.payload
          )
        }

      override def addPublishingListener(
          channel: AMQPChannel,
          listener: PublishReturn => F[Unit]
      ): F[Unit] =
        Sync[F].delay {
          val returnListener = new ReturnListener {
            override def handleReturn(replyCode: Int,
                                      replyText: String,
                                      exchange: String,
                                      routingKey: String,
                                      properties: AMQP.BasicProperties,
                                      body: Array[Byte]): Unit = {
              val publishReturn =
                PublishReturn(
                  ReplyCode(replyCode),
                  ReplyText(replyText),
                  ExchangeName(exchange),
                  RoutingKey(routingKey),
                  AmqpProperties.unsafeFrom(properties),
                  AmqpBody(body)
                )

              listener(publishReturn).toIO.unsafeRunAsync(_ => ())
            }
          }

          channel.value.addReturnListener(returnListener)
        }.void

      override def clearPublishingListeners(channel: AMQPChannel): F[Unit] =
        Sync[F].delay {
          channel.value.clearReturnListeners()
        }.void
    }
}

trait Publish[F[_]] {
  def basicPublish(channel: AMQPChannel,
                   exchangeName: ExchangeName,
                   routingKey: RoutingKey,
                   msg: AmqpMessage[Array[Byte]]): F[Unit]
  def basicPublishWithFlag(channel: AMQPChannel,
                           exchangeName: ExchangeName,
                           routingKey: RoutingKey,
                           flag: PublishingFlag,
                           msg: AmqpMessage[Array[Byte]]): F[Unit]
  def addPublishingListener(channel: AMQPChannel, listener: PublishReturn => F[Unit]): F[Unit]
  def clearPublishingListeners(channel: AMQPChannel): F[Unit]
} 
Example 50
Source File: TextExtract.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.extract.ocr

import cats.effect.{Blocker, ContextShift, Sync}
import fs2.Stream

import docspell.common._
import docspell.extract.internal.Text
import docspell.files._

object TextExtract {

  def extract[F[_]: Sync: ContextShift](
      in: Stream[F, Byte],
      blocker: Blocker,
      logger: Logger[F],
      lang: String,
      config: OcrConfig
  ): Stream[F, Text] =
    extractOCR(in, blocker, logger, lang, config)

  def extractOCR[F[_]: Sync: ContextShift](
      in: Stream[F, Byte],
      blocker: Blocker,
      logger: Logger[F],
      lang: String,
      config: OcrConfig
  ): Stream[F, Text] =
    Stream
      .eval(TikaMimetype.detect(in, MimeTypeHint.none))
      .flatMap({
        case MimeType.pdf =>
          Stream.eval(Ocr.extractPdf(in, blocker, logger, lang, config)).unNoneTerminate

        case mt if mt.primary == "image" =>
          Ocr.extractImage(in, blocker, logger, lang, config)

        case mt =>
          raiseError(s"File `$mt` not supported")
      })
      .map(Text.apply)

  private def raiseError[F[_]: Sync](msg: String): Stream[F, Nothing] =
    Stream.raiseError[F](new Exception(msg))
} 
Example 51
Source File: TestFiles.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.files

import cats.effect.{Blocker, IO}
import fs2.Stream

import scala.concurrent.ExecutionContext

object TestFiles {
  val blocker     = Blocker.liftExecutionContext(ExecutionContext.global)
  implicit val CS = IO.contextShift(ExecutionContext.global)

  val letterSourceDE: Stream[IO, Byte] =
    ExampleFiles.letter_de_pdf
      .readURL[IO](8 * 1024, blocker)

  val letterSourceEN: Stream[IO, Byte] =
    ExampleFiles.letter_en_pdf
      .readURL[IO](8 * 1024, blocker)

  lazy val letterDEText =
    ExampleFiles.letter_de_txt
      .readText[IO](8 * 1024, blocker)
      .unsafeRunSync

  lazy val letterENText =
    ExampleFiles.letter_en_txt
      .readText[IO](8 * 1024, blocker)
      .unsafeRunSync
} 
Example 52
Source File: Playing.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.files

import cats.effect.{Blocker, ExitCode, IO, IOApp}
import docspell.common.MimeTypeHint

import scala.concurrent.ExecutionContext

object Playing extends IOApp {
  val blocker = Blocker.liftExecutionContext(ExecutionContext.global)

  def run(args: List[String]): IO[ExitCode] =
    IO {
      //val ods = ExampleFiles.examples_sample_ods.readURL[IO](8192, blocker)
      //val odt = ExampleFiles.examples_sample_odt.readURL[IO](8192, blocker)
      val rtf = ExampleFiles.examples_sample_rtf.readURL[IO](8192, blocker)

      val x = for {
        odsm1 <-
          TikaMimetype
            .detect(
              rtf,
              MimeTypeHint.filename(ExampleFiles.examples_sample_rtf.path.segments.last)
            )
        odsm2 <- TikaMimetype.detect(rtf, MimeTypeHint.none)
      } yield (odsm1, odsm2)
      println(x.unsafeRunSync())
      ExitCode.Success
    }
} 
Example 53
Source File: ImageSizeTest.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.files

import cats.implicits._
import cats.effect.{Blocker, IO}
import minitest.SimpleTestSuite

import scala.concurrent.ExecutionContext
import scala.util.Using

object ImageSizeTest extends SimpleTestSuite {
  val blocker     = Blocker.liftExecutionContext(ExecutionContext.global)
  implicit val CS = IO.contextShift(ExecutionContext.global)

  //tiff files are not supported on the jdk by default
  //requires an external library
  val files = List(
    ExampleFiles.camera_letter_en_jpg -> Dimension(1695, 2378),
    ExampleFiles.camera_letter_en_png -> Dimension(1695, 2378),
//    ExampleFiles.camera_letter_en_tiff -> Dimension(1695, 2378),
    ExampleFiles.scanner_jfif_jpg    -> Dimension(2480, 3514),
    ExampleFiles.bombs_20K_gray_jpeg -> Dimension(20000, 20000),
    ExampleFiles.bombs_20K_gray_png  -> Dimension(20000, 20000),
    ExampleFiles.bombs_20K_rgb_jpeg  -> Dimension(20000, 20000),
    ExampleFiles.bombs_20K_rgb_png   -> Dimension(20000, 20000)
  )

  test("get sizes from input-stream") {
    files.foreach {
      case (uri, expect) =>
        val url = uri.toJavaUrl.fold(sys.error, identity)
        Using.resource(url.openStream()) { in =>
          val dim = ImageSize.get(in)
          assertEquals(dim, expect.some)
        }
    }
  }

  test("get sizes from stream") {
    files.foreach {
      case (uri, expect) =>
        val stream = uri.readURL[IO](8192, blocker)
        val dim    = ImageSize.get(stream).unsafeRunSync()
        assertEquals(dim, expect.some)
    }
  }
}