cats.effect.Async Scala Examples

The following examples show how to use cats.effect.Async. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: AirlinesModule.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.airlines

import cats.effect.Async
import com.smartbackpackerapp.airlines.parser.{AirlineFile, AirlinesFileParser, AllowanceFile}
import com.smartbackpackerapp.airlines.sql.AirlinesInsertData
import doobie.util.transactor.Transactor

class AirlinesModule[F[_] : Async] {

  val devDbUrl: String  = sys.env.getOrElse("JDBC_DATABASE_URL", "")
  val dbUrl: String     = sys.env.getOrElse("SB_DB_URL", "jdbc:postgresql:sb")

  private val dbDriver  = sys.env.getOrElse("SB_DB_DRIVER", "org.postgresql.Driver")
  private val dbUser    = sys.env.getOrElse("SB_DB_USER", "postgres")
  private val dbPass    = sys.env.getOrElse("SB_DB_PASSWORD", "")

  private val xa = {
    if (devDbUrl.nonEmpty) Transactor.fromDriverManager[F](dbDriver, devDbUrl)
    else Transactor.fromDriverManager[F](dbDriver, dbUrl, dbUser, dbPass)
  }

  def airlinesInsertData(airlineFile: AirlineFile,
                         allowanceFile: AllowanceFile): AirlinesInsertData[F] = {
    val parser = AirlinesFileParser[F](airlineFile, allowanceFile)
    new AirlinesInsertData[F](xa, parser)
  }

} 
Example 2
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessing._
import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M])(implicit F: Async[F],
                                          materializer: Materializer): Process[F] =
      Process(run = F.delay {
        val (killSwitch, terminated) = source
          .viaMat(KillSwitches.single)(Keep.right)
          .toMat(Sink.ignore)(Keep.both)
          .run()
        RunningProcess(F.fromFuture(terminated).void, F.delay(killSwitch.shutdown()))
      })
  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 3
Source File: effect.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.util

import cats.effect.{ Async, Effect, IO }

import scala.concurrent.{ Future, Promise }

object effect {
  implicit final class AecorEffectOps[F[_], A](val self: F[A]) extends AnyVal {
    @inline final def unsafeToFuture()(implicit F: Effect[F]): Future[A] = {
      val p = Promise[A]
      F.runAsync(self) {
          case Right(a) => IO { p.success(a); () }
          case Left(e)  => IO { p.failure(e); () }
        }
        .unsafeRunSync()
      p.future
    }
  }

  implicit final class AecorLiftIOOps[F[_]](val self: Async[F]) extends AnyVal {
    def fromFuture[A](future: => Future[A]): F[A] =
      IO.fromFuture(IO(future))(IO.contextShift(scala.concurrent.ExecutionContext.global)).to(self)
  }
} 
Example 4
Source File: Session.scala    From aecor   with MIT License 5 votes vote down vote up
package akka.persistence.cassandra
import java.util.concurrent.Executor

import cats.data.Kleisli
import cats.effect.{ Async, ContextShift }
import com.datastax.driver.core.{ ResultSet, TypeCodec, Session => DatastaxSession }

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

trait Session[F[_]] {
  def execute(query: String): F[ResultSet]
  def registerCodec[A](codec: TypeCodec[A]): F[Unit]
}

object Session {
  type Init[F[_]] = Kleisli[F, Session[F], Unit]
  def Init[F[_]](f: Session[F] => F[Unit]): Init[F] = Kleisli(f)
  private val immediateExecutor = new Executor {
    override def execute(command: Runnable): Unit =
      command.run()
  }

  private val immediateExecutionContext = ExecutionContext.fromExecutor(immediateExecutor)

  def apply[F[_]](datastaxSession: DatastaxSession)(implicit F: Async[F],
                                                    contextShift: ContextShift[F]): Session[F] =
    new Session[F] {
      final override def execute(query: String): F[ResultSet] =
        contextShift.evalOn(immediateExecutionContext) {
          F.async { cb =>
            val future = datastaxSession.executeAsync(query)
            val runnable = new Runnable {
              override def run(): Unit =
                try {
                  cb(Right(future.get()))
                } catch {
                  case NonFatal(e) =>
                    cb(Left(e))
                }
            }
            future.addListener(runnable, immediateExecutor)
          }
        }
      override def registerCodec[A](codec: TypeCodec[A]): F[Unit] =
        F.delay {
          datastaxSession.getCluster.getConfiguration.getCodecRegistry.register(codec)
          ()
        }
    }
} 
Example 5
Source File: Execute.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu

import cats.effect.{Async, ContextShift}

import tofu.syntax.monadic._
import scala.concurrent.{ExecutionContext, Future}

import simulacrum.typeclass

@typeclass
trait Execute[F[_]] {
  def executionContext: F[ExecutionContext]

  def deferFutureAction[A](f: ExecutionContext => Future[A]): F[A]

  def deferFuture[A](f: => Future[A]): F[A] = deferFutureAction(_ => f)
}

object Execute {
  final implicit def asyncExecute[F[_]](implicit
      ec: ExecutionContext,
      cs: ContextShift[F],
      asyncF: Async[F]
  ): Execute[F] = new Execute[F] {
    def executionContext: F[ExecutionContext]                        = ec.pure[F]
    def deferFutureAction[A](f: ExecutionContext => Future[A]): F[A] =
      Async.fromFuture(asyncF.delay(f(ec)))
  }
} 
Example 6
Source File: ProducerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import scala.collection.compat._
import cats.implicits._
import cats.effect.Async
import java.util.concurrent.{Future => JFuture}
import scala.jdk.CollectionConverters._
import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._

case class ProducerImpl[F[_], K, V](p: Producer[K, V])(implicit F: Async[F])
    extends ProducerApi[F, K, V] {
  def abortTransaction: F[Unit] = F.delay(p.abortTransaction())
  def beginTransaction: F[Unit] = F.delay(p.beginTransaction())
  def close: F[Unit] = F.delay(p.close())
  def close(timeout: FiniteDuration): F[Unit] =
    F.delay(p.close(java.time.Duration.ofMillis(timeout.toMillis)))
  def commitTransaction: F[Unit] = F.delay(p.commitTransaction())
  def flush: F[Unit] = F.delay(p.flush())
  def initTransactions: F[Unit] = F.delay(p.initTransactions())
  def metrics: F[Map[MetricName, Metric]] = F.delay(p.metrics().asScala.toMap)
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    F.delay(p.partitionsFor(topic).asScala.toSeq)
  def sendOffsetsToTransaction(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  ): F[Unit] =
    F.delay(p.sendOffsetsToTransaction(offsets.asJava, consumerGroupId))

  private[producer] def sendRaw(record: ProducerRecord[K, V]): JFuture[RecordMetadata] =
    p.send(record)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Callback
  ): JFuture[RecordMetadata] = p.send(record, callback)

  
  def sendAsync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    F.async(sendRaw(record, _))
}

object ProducerImpl {
  //returns the type expected when creating a Resource
  def create[F[_]: Async, K, V](
      p: Producer[K, V]
  ): ProducerApi[F, K, V] =
    ProducerImpl(p)
} 
Example 7
Source File: ShiftingProducerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import cats.effect.{Async, ContextShift}
import java.util.concurrent.{Future => JFuture}

import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._

import scala.concurrent.ExecutionContext

case class ShiftingProducerImpl[F[_]: Async, K, V](
    p: ProducerApi[F, K, V],
    blockingContext: ExecutionContext
)(implicit CS: ContextShift[F])
    extends ProducerApi[F, K, V] {
  def abortTransaction: F[Unit] = CS.evalOn(blockingContext)(p.abortTransaction)
  def beginTransaction: F[Unit] = CS.evalOn(blockingContext)(p.beginTransaction)
  def close: F[Unit] = CS.evalOn(blockingContext)(p.close)
  def close(timeout: FiniteDuration): F[Unit] = CS.evalOn(blockingContext)(p.close(timeout))
  def commitTransaction: F[Unit] = CS.evalOn(blockingContext)(p.commitTransaction)
  def flush: F[Unit] = CS.evalOn(blockingContext)(p.flush)
  def initTransactions: F[Unit] = CS.evalOn(blockingContext)(p.initTransactions)
  def metrics: F[Map[MetricName, Metric]] = CS.evalOn(blockingContext)(p.metrics)
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(p.partitionsFor(topic))
  def sendOffsetsToTransaction(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  ): F[Unit] =
    CS.evalOn(blockingContext)(p.sendOffsetsToTransaction(offsets, consumerGroupId))

  private[producer] def sendRaw(record: ProducerRecord[K, V]): JFuture[RecordMetadata] =
    p.sendRaw(record)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Callback
  ): JFuture[RecordMetadata] = p.sendRaw(record, callback)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Either[Exception, RecordMetadata] => Unit
  ): Unit = p.sendRaw(record, callback)

  def sendAndForget(record: ProducerRecord[K, V]): F[Unit] =
    CS.evalOn(blockingContext)(p.sendAndForget(record))
  def sendSync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    CS.evalOn(blockingContext)(p.sendSync(record))
  def sendAsync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    CS.evalOn(blockingContext)(p.sendAsync(record))
} 
Example 8
Source File: ShiftingConsumerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.consumer

import cats.effect.{Async, ContextShift}
import java.util.regex.Pattern

import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer._

import scala.concurrent.ExecutionContext

case class ShiftingConsumerImpl[F[_]: Async, K, V](
    c: ConsumerApi[F, K, V],
    blockingContext: ExecutionContext
)(implicit CS: ContextShift[F])
    extends ConsumerApi[F, K, V] {
  def assign(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.assign(partitions))
  def assignment: F[Set[TopicPartition]] = CS.evalOn(blockingContext)(c.assignment)
  def beginningOffsets(partitions: Iterable[TopicPartition]): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.beginningOffsets(partitions))
  def beginningOffsets(
      partitions: Iterable[TopicPartition],
      timeout: FiniteDuration
  ): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.beginningOffsets(partitions, timeout))
  def close: F[Unit] = CS.evalOn(blockingContext)(c.close)
  def close(timeout: FiniteDuration): F[Unit] = CS.evalOn(blockingContext)(c.close(timeout))
  def commitAsync: F[Unit] = CS.evalOn(blockingContext)(c.commitAsync)
  def commitAsync(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      callback: OffsetCommitCallback
  ): F[Unit] =
    CS.evalOn(blockingContext)(c.commitAsync(offsets, callback))
  def commitAsync(callback: OffsetCommitCallback): F[Unit] =
    CS.evalOn(blockingContext)(c.commitAsync(callback))
  def commitSync: F[Unit] = CS.evalOn(blockingContext)(c.commitSync)
  def commitSync(offsets: Map[TopicPartition, OffsetAndMetadata]): F[Unit] =
    CS.evalOn(blockingContext)(c.commitSync(offsets))
  def committed(partition: Set[TopicPartition]): F[Map[TopicPartition, OffsetAndMetadata]] =
    CS.evalOn(blockingContext)(c.committed(partition))
  def endOffsets(partitions: Iterable[TopicPartition]): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.endOffsets(partitions))
  def endOffsets(
      partitions: Iterable[TopicPartition],
      timeout: FiniteDuration
  ): F[Map[TopicPartition, Long]] =
    CS.evalOn(blockingContext)(c.endOffsets(partitions, timeout))
  def listTopics: F[Map[String, Seq[PartitionInfo]]] = CS.evalOn(blockingContext)(c.listTopics)
  def listTopics(timeout: FiniteDuration): F[Map[String, Seq[PartitionInfo]]] =
    CS.evalOn(blockingContext)(c.listTopics(timeout))
  def metrics: F[Map[MetricName, Metric]] = CS.evalOn(blockingContext)(c.metrics)
  def offsetsForTimes(
      timestampsToSearch: Map[TopicPartition, Long]
  ): F[Map[TopicPartition, OffsetAndTimestamp]] =
    CS.evalOn(blockingContext)(c.offsetsForTimes(timestampsToSearch))
  def offsetsForTimes(
      timestampsToSearch: Map[TopicPartition, Long],
      timeout: FiniteDuration
  ): F[Map[TopicPartition, OffsetAndTimestamp]] =
    CS.evalOn(blockingContext)(c.offsetsForTimes(timestampsToSearch, timeout))
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(c.partitionsFor(topic))
  def partitionsFor(topic: String, timeout: FiniteDuration): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(c.partitionsFor(topic, timeout))
  def pause(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.pause(partitions))
  def paused: F[Set[TopicPartition]] = CS.evalOn(blockingContext)(c.paused)
  def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] =
    CS.evalOn(blockingContext)(c.poll(timeout))
  def position(partition: TopicPartition): F[Long] =
    CS.evalOn(blockingContext)(c.position(partition))
  def resume(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.resume(partitions))
  def seek(partition: TopicPartition, offset: Long): F[Unit] =
    CS.evalOn(blockingContext)(c.seek(partition, offset))
  def seekToBeginning(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.seekToBeginning(partitions))
  def seekToEnd(partitions: Iterable[TopicPartition]): F[Unit] =
    CS.evalOn(blockingContext)(c.seekToEnd(partitions))
  def subscribe(topics: Iterable[String]): F[Unit] = CS.evalOn(blockingContext)(c.subscribe(topics))
  def subscribe(topics: Iterable[String], callback: ConsumerRebalanceListener): F[Unit] =
    CS.evalOn(blockingContext)(c.subscribe(topics, callback))
  def subscribe(pattern: Pattern): F[Unit] = CS.evalOn(blockingContext)(c.subscribe(pattern))
  def subscribe(pattern: Pattern, callback: ConsumerRebalanceListener): F[Unit] =
    CS.evalOn(blockingContext)(c.subscribe(pattern, callback))
  def subscription: F[Set[String]] = CS.evalOn(blockingContext)(c.subscription)
  def unsubscribe: F[Unit] = CS.evalOn(blockingContext)(c.unsubscribe)
  def wakeup: F[Unit] = c.wakeup //TODO wakeup is the one method that is thread-safe, right?
}

object ShiftingConsumerImpl {
  //returns the type expected when creating a Resource
  def create[F[_]: Async: ContextShift, K, V](
      c: ConsumerApi[F, K, V],
      e: ExecutionContext
  ): ConsumerApi[F, K, V] =
    ShiftingConsumerImpl(c, e)
} 
Example 9
Source File: DatabaseConfig.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore.config

import cats.syntax.functor._
import cats.effect.{Async, Blocker, ContextShift, Resource, Sync}
import doobie.hikari.HikariTransactor
import org.flywaydb.core.Flyway

import scala.concurrent.ExecutionContext

case class DatabaseConnectionsConfig(poolSize: Int)
case class DatabaseConfig(
    url: String,
    driver: String,
    user: String,
    password: String,
    connections: DatabaseConnectionsConfig,
)

object DatabaseConfig {
  def dbTransactor[F[_]: Async: ContextShift](
      dbc: DatabaseConfig,
      connEc: ExecutionContext,
      blocker: Blocker,
  ): Resource[F, HikariTransactor[F]] =
    HikariTransactor
      .newHikariTransactor[F](dbc.driver, dbc.url, dbc.user, dbc.password, connEc, blocker)

  
  def initializeDb[F[_]](cfg: DatabaseConfig)(implicit S: Sync[F]): F[Unit] =
    S.delay {
        val fw: Flyway = {
          Flyway
            .configure()
            .dataSource(cfg.url, cfg.user, cfg.password)
            .load()
        }
        fw.migrate()
      }
      .as(())
} 
Example 10
Source File: package.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore
package infrastructure.repository

import cats.implicits._
import cats.effect.{Async, ContextShift, Effect, IO}
import config._
import _root_.doobie.Transactor
import io.circe.config.parser

import scala.concurrent.ExecutionContext

package object doobie {
  def getTransactor[F[_]: Async: ContextShift](cfg: DatabaseConfig): Transactor[F] =
    Transactor.fromDriverManager[F](
      cfg.driver, // driver classname
      cfg.url, // connect URL (driver-specific)
      cfg.user, // user
      cfg.password, // password
    )

  
  def initializedTransactor[F[_]: Effect: Async: ContextShift]: F[Transactor[F]] =
    for {
      petConfig <- parser.decodePathF[F, PetStoreConfig]("petstore")
      _ <- DatabaseConfig.initializeDb(petConfig.db)
    } yield getTransactor(petConfig.db)

  lazy val testEc = ExecutionContext.Implicits.global

  implicit lazy val testCs = IO.contextShift(testEc)

  lazy val testTransactor = initializedTransactor[IO].unsafeRunSync()
} 
Example 11
Source File: CatsImplicitsSpec.scala    From neotypes   with MIT License 5 votes vote down vote up
package neotypes.cats.effect

import cats.{Applicative, Monad}
import cats.effect.{Async, IO, Resource}
import cats.effect.implicits._
import cats.implicits._
import neotypes.{BaseIntegrationSpec, Driver, Session}
import neotypes.cats.effect.implicits._
import neotypes.implicits.all._
import org.neo4j.driver.v1.exceptions.ClientException


final class CatsImplicitsSpec extends BaseIntegrationSpec[IO](IOTestkit) {
  it should "work with cats implicits and neotypes implicits" in {
    def test1[F[_]: Applicative]: F[Unit] = Applicative[F].unit
    def test2[F[_]: Monad]: F[Unit] = ().pure[F]

    def makeSession[F[_]: Async]: Resource[F, Session[F]] =
      Resource
        .make(Async[F].delay(new Driver[F](this.driver)))(_.close)
        .flatMap(_.session)

    def useSession[F[_]: Async]: F[String] = makeSession[F].use { s =>
      (test1[F] *> test2[F]).flatMap { _=>
        """match (p:Person {name: "Charlize Theron"}) return p.name"""
          .query[String]
          .single(s)
      }
    }

    useSession[IO].unsafeToFuture().map {
      name => assert(name == "Charlize Theron")
    }
  }

  override val initQuery: String = BaseIntegrationSpec.DEFAULT_INIT_QUERY
} 
Example 12
Source File: KafkaConsumer.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.kafkadistributedprocessing.internal

import java.time.Duration
import java.util.Properties
import java.util.concurrent.Executors

import cats.effect.{ Async, ContextShift, Resource }
import cats.~>
import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRebalanceListener, ConsumerRecords }
import org.apache.kafka.common.PartitionInfo
import org.apache.kafka.common.serialization.Deserializer

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration

private[kafkadistributedprocessing] final class KafkaConsumer[F[_], K, V](
  withConsumer: (Consumer[K, V] => *) ~> F
) {

  def subscribe(topics: Set[String], listener: ConsumerRebalanceListener): F[Unit] =
    withConsumer(_.subscribe(topics.asJava, listener))

  def subscribe(topics: Set[String]): F[Unit] =
    withConsumer(_.subscribe(topics.asJava))

  val unsubscribe: F[Unit] =
    withConsumer(_.unsubscribe())

  def partitionsFor(topic: String): F[Set[PartitionInfo]] =
    withConsumer(_.partitionsFor(topic).asScala.toSet)

  def close: F[Unit] =
    withConsumer(_.close())

  def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] =
    withConsumer(_.poll(Duration.ofNanos(timeout.toNanos)))
}

private[kafkadistributedprocessing] object KafkaConsumer {
  final class Create[F[_]] {
    def apply[K, V](
      config: Properties,
      keyDeserializer: Deserializer[K],
      valueDeserializer: Deserializer[V]
    )(implicit F: Async[F], contextShift: ContextShift[F]): Resource[F, KafkaConsumer[F, K, V]] = {
      val create = F.suspend {

        val executor = Executors.newSingleThreadExecutor()

        def eval[A](a: => A): F[A] =
          contextShift.evalOn(ExecutionContext.fromExecutor(executor)) {
            F.async[A] { cb =>
              executor.execute(new Runnable {
                override def run(): Unit =
                  cb {
                    try Right(a)
                    catch {
                      case e: Throwable => Left(e)
                    }
                  }
              })
            }
          }

        eval {
          val original = Thread.currentThread.getContextClassLoader
          Thread.currentThread.setContextClassLoader(null)
          val consumer = new org.apache.kafka.clients.consumer.KafkaConsumer[K, V](
            config,
            keyDeserializer,
            valueDeserializer
          )
          Thread.currentThread.setContextClassLoader(original)
          val withConsumer = new ((Consumer[K, V] => *) ~> F) {
            def apply[A](f: Consumer[K, V] => A): F[A] =
              eval(f(consumer))
          }
          new KafkaConsumer[F, K, V](withConsumer)
        }
      }
      Resource.make(create)(_.close)
    }
  }
  def create[F[_]]: Create[F] = new Create[F]
} 
Example 13
Source File: AirlinesInsertData.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.airlines.sql

import cats.effect.Async
import cats.instances.list._
import com.smartbackpackerapp.airlines.parser.AirlinesFileParser
import com.smartbackpackerapp.common.Log
import com.smartbackpackerapp.model.{Airline, BaggagePolicy}
import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.util.transactor.Transactor
import doobie.util.update.{Update, Update0}
import fs2.Stream

class AirlinesInsertData[F[_] : Async](xa: Transactor[F],
                                       airlinesParser: AirlinesFileParser[F])
                                      (implicit L: Log[F]) {

  import AirlineInsertStatement._

  private def program(airline: Airline): ConnectionIO[Unit] =
    for {
      airlineId <- insertAirline(airline.name.value).withUniqueGeneratedKeys[Int]("airline_id")
      policyId  <- insertBaggagePolicy(airlineId, airline.baggagePolicy).withUniqueGeneratedKeys[Int]("policy_id")
      _         <- insertManyBaggageAllowance(policyId).updateMany(airline.baggagePolicy.allowance.toDTO(policyId))
    } yield ()

  def run: Stream[F, Unit] =
    for {
      a <- airlinesParser.airlines
      _ <- Stream.eval(L.info(s"Persisting: $a"))
      _ <- Stream.eval(program(a).transact(xa))
    } yield ()

}

object AirlineInsertStatement {

  def insertAirline(name: String): Update0 = {
    sql"INSERT INTO airline (name) VALUES ($name)"
      .update
  }

  def insertBaggagePolicy(airlineId: Int,
                          baggagePolicy: BaggagePolicy): Update0 = {
    sql"INSERT INTO baggage_policy (airline_id, extra, website) VALUES ($airlineId, ${baggagePolicy.extra}, ${baggagePolicy.website})"
      .update
  }

  def insertManyBaggageAllowance(policyId: Int): Update[CreateBaggageAllowanceDTO] = {
    val sql = "INSERT INTO baggage_allowance (policy_id, baggage_type, kgs, height, width, depth) VALUES (?, ?, ?, ?, ?, ?)"
    Update[CreateBaggageAllowanceDTO](sql)
  }

} 
Example 14
Source File: CountryInsertData.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.scraper.sql

import cats.effect.Async
import cats.instances.list._
import cats.syntax.apply._
import cats.syntax.flatMap._
import com.smartbackpackerapp.model._
import com.smartbackpackerapp.scraper.config.ScraperConfiguration
import doobie.free.connection.ConnectionIO
import doobie.implicits._
import doobie.util.transactor.Transactor
import doobie.util.update.Update

class CountryInsertData[F[_]](scraperConfig: ScraperConfiguration[F],
                              xa : Transactor[F])
                             (implicit F: Async[F]) {

  private def insertCountriesBulk(countries: List[Country]): ConnectionIO[Int] = {
    CountryInsertStatement.insertCountries
      .updateMany(countries.map(c => (c.code.value, c.name.value, c.currency.value, false)))
  }

  private def updateCountriesCurrencyBulk(countries: List[Country]): ConnectionIO[Int] = {
    CountryInsertStatement.updateCountriesCurrency
      .updateMany(countries.map(c => (c.currency.value, c.code.value)))
  }

  private def updateSchengenCountriesBulk(countries: List[CountryCode]): ConnectionIO[Int] = {
    CountryInsertStatement.updateSchengenCountries
      .updateMany(countries.map(_.value))
  }

  private def runSchengenUpdate: F[Unit] = {
    scraperConfig.schengen() flatMap { countries =>
      updateSchengenCountriesBulk(countries).transact(xa) *> F.unit
    }
  }

  private def runCurrencyUpdate: F[Unit] = {
    scraperConfig.countries() flatMap { countries =>
      updateCountriesCurrencyBulk(countries).transact(xa) *> F.unit
    }
  }

  def runUpdate: F[Unit] = {
    runSchengenUpdate.flatMap(_ => runCurrencyUpdate)
  }

  def run: F[Unit] = {
    scraperConfig.countries() flatMap { countries =>
      insertCountriesBulk(countries).transact(xa) *> F.unit
    }
  }

}

object CountryInsertStatement {

  type CountryDTO       = (String, String, String, Boolean)
  type CurrencyQueryDTO = (String, String)

  val insertCountries: Update[CountryDTO] = {
    val sql = "INSERT INTO countries (code, name, currency, schengen) VALUES (?, ?, ?, ?)"
    Update[CountryDTO](sql)
  }

  val updateCountriesCurrency: Update[CurrencyQueryDTO] = {
    val sql = "UPDATE countries SET currency = ? WHERE code = ?"
    Update[CurrencyQueryDTO](sql)
  }

  val updateSchengenCountries: Update[String] = {
    val sql = "UPDATE countries SET schengen = 't' WHERE code = ?"
    Update[String](sql)
  }

} 
Example 15
Source File: VisaRestrictionsIndexInsertData.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.scraper.sql

import cats.effect.Async
import cats.instances.list._
import cats.syntax.apply._
import com.smartbackpackerapp.model._
import doobie.implicits._
import doobie.util.transactor.Transactor
import doobie.util.update.Update

class VisaRestrictionsIndexInsertData[F[_]](xa: Transactor[F])(implicit F: Async[F]) {

  private def insertVisaIndexBulk(list: List[(CountryCode, VisaRestrictionsIndex)]) = {
    VisaRestrictionsIndexInsertStatement.insertVisaIndex
      .updateMany(list.map { case (code, index) =>
        (code.value, index.rank.value, index.count.value, index.sharing.value)
      })
  }

  def run(list: List[(CountryCode, VisaRestrictionsIndex)]): F[Unit] = {
    // Netherlands is duplicated in ranking 2018
    insertVisaIndexBulk(list.toSet.toList).transact(xa) *> F.unit
  }

}

object VisaRestrictionsIndexInsertStatement {

  type CreateVisaIndexDTO = (String, Int, Int, Int)

  val insertVisaIndex: Update[CreateVisaIndexDTO] = {
    val sql = "INSERT INTO visa_restrictions_index (country_code, rank, acc, sharing) VALUES (?, ?, ?, ?)"
    Update[CreateVisaIndexDTO](sql)
  }

} 
Example 16
Source File: VisaRequirementsInsertData.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.scraper.sql

import java.sql.BatchUpdateException

import cats.effect.Async
import cats.instances.list._
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
import cats.syntax.functor._
import com.smartbackpackerapp.common.Log
import com.smartbackpackerapp.model._
import com.smartbackpackerapp.scraper.model._
import com.smartbackpackerapp.scraper.parser.AbstractVisaRequirementsParser
import doobie.implicits._
import doobie.util.transactor.Transactor
import doobie.util.update.Update

class VisaRequirementsInsertData[F[_] : Async](xa: Transactor[F],
                                               visaRequirementsParser: AbstractVisaRequirementsParser[F])
                                              (implicit L: Log[F]) {

  private def insertVisaRequirementsBulk(list: List[VisaRequirementsFor]) = {
    VisaRequirementsInsertStatement.insertVisaRequirements
      .updateMany(list.map(_.toVisaRequirementsDTO))
  }

  // For example Algerian Wiki page has Burundi duplicated
  private val errorHandler: PartialFunction[Throwable, F[Unit]] = {
    case e: BatchUpdateException if e.getCause.getMessage.contains("duplicate key value") => L.error(e)
    case e: WikiPageNotFound => L.error(e)
  }

  def run(from: CountryCode): F[Unit] = {
    val program = for {
      _   <- L.info(s"${from.value} >> Gathering visa requirements from Wikipedia")
      req <- visaRequirementsParser.visaRequirementsFor(from)
      _   <- L.info(s"${from.value} >> Starting data insertion into DB")
      rs  <- insertVisaRequirementsBulk(req).transact(xa)
      _   <- L.info(s"${from.value} >> Created $rs records")
    } yield ()
    program.recoverWith(errorHandler)
  }

}

object VisaRequirementsInsertStatement {

  val insertVisaRequirements: Update[VisaRequirementsDTO] = {
    val sql =
      """
        |WITH from_view AS (
        |  SELECT id AS from_id FROM countries WHERE code = ?
        |),
        |to_view AS (
        |  SELECT id AS to_id FROM countries WHERE code = ?
        |),
        |visa_cat_view AS (
        |  SELECT id AS visa_id FROM visa_category WHERE name = ?
        |),
        |desc_view AS (
        |  SELECT ? AS description
        |)
        |INSERT INTO visa_requirements (from_country, to_country, visa_category, description)
        |SELECT from_id, to_id, visa_id, description FROM from_view, to_view, visa_cat_view, desc_view
      """.stripMargin
    Update[VisaRequirementsDTO](sql)
  }

} 
Example 17
Source File: VisaCategoryInsertData.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.scraper.sql

import cats.effect.Async
import cats.instances.list._
import cats.syntax.apply._
import com.smartbackpackerapp.model._
import doobie.implicits._
import doobie.util.transactor.Transactor
import doobie.util.update.Update

import scala.reflect.runtime.{universe => ru}

class VisaCategoryInsertData[F[_]](xa : Transactor[F])(implicit F: Async[F]) {

  private def insertVisaCategoriesBulk(categories: List[String]) = {
    VisaCategoryInsertStatement.insertVisaCategories.updateMany(categories)
  }

  private def visaCategories: List[String] = {
    val tpe   = ru.typeOf[VisaCategory]
    val clazz = tpe.typeSymbol.asClass
    clazz.knownDirectSubclasses.map(_.name.toString).toList
  }

  def run: F[Unit] = {
    insertVisaCategoriesBulk(visaCategories).transact(xa) *> F.unit
  }

}

object VisaCategoryInsertStatement {

  val insertVisaCategories: Update[String] = {
    val sql = "INSERT INTO visa_category (name) VALUES (?)"
    Update[String](sql)
  }

} 
Example 18
Source File: ProjectAttributesCoordinator.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.async

import akka.actor.{ActorRef, ActorSystem}
import cats.effect.Async
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.kg.async.ProjectAttributesCoordinatorActor.Msg._
import ch.epfl.bluebrain.nexus.kg.cache.ProjectCache
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.kg.resources.syntax._
import ch.epfl.bluebrain.nexus.kg.resources.{Files, OrganizationRef}
import ch.epfl.bluebrain.nexus.kg.storage.Storage.StorageOperations.FetchAttributes
import ch.epfl.bluebrain.nexus.sourcing.projections.Projections
import monix.eval.Task


  def stop(projectRef: ProjectRef): F[Unit] = {
    ref ! Stop(projectRef.id)
    F.unit
  }
}

object ProjectAttributesCoordinator {
  def apply(files: Files[Task], projectCache: ProjectCache[Task])(
      implicit config: AppConfig,
      fetchAttributes: FetchAttributes[Task],
      as: ActorSystem,
      P: Projections[Task, String]
  ): ProjectAttributesCoordinator[Task] = {
    val coordinatorRef = ProjectAttributesCoordinatorActor.start(files, None, config.cluster.shards)
    new ProjectAttributesCoordinator[Task](projectCache, coordinatorRef)
  }
} 
Example 19
Source File: CatsInterop.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.interop.cats

import caliban.introspection.adt.__Type
import caliban.schema.Step.QueryStep
import caliban.schema.{ Schema, Step }
import caliban.{ CalibanError, GraphQL, GraphQLInterpreter, GraphQLResponse, InputValue }
import cats.effect.implicits._
import cats.effect.{ Async, Effect }
import zio.interop.catz._
import zio.{ Runtime, _ }
import zio.query.ZQuery

object CatsInterop {

  def executeAsync[F[_]: Async, R, E](graphQL: GraphQLInterpreter[R, E])(
    query: String,
    operationName: Option[String] = None,
    variables: Map[String, InputValue] = Map(),
    extensions: Map[String, InputValue] = Map(),
    skipValidation: Boolean = false,
    enableIntrospection: Boolean = true
  )(implicit runtime: Runtime[R]): F[GraphQLResponse[E]] =
    Async[F].async { cb =>
      val execution = graphQL.execute(
        query,
        operationName,
        variables,
        extensions,
        skipValidation = skipValidation,
        enableIntrospection = enableIntrospection
      )

      runtime.unsafeRunAsync(execution)(exit => cb(exit.toEither))
    }

  def checkAsync[F[_]: Async, R](
    graphQL: GraphQLInterpreter[R, Any]
  )(query: String)(implicit runtime: Runtime[R]): F[Unit] =
    Async[F].async(cb => runtime.unsafeRunAsync(graphQL.check(query))(exit => cb(exit.toEither)))

  def interpreterAsync[F[_]: Async, R](
    graphQL: GraphQL[R]
  )(implicit runtime: Runtime[R]): F[GraphQLInterpreter[R, CalibanError]] =
    Async[F].async(cb => runtime.unsafeRunAsync(graphQL.interpreter)(exit => cb(exit.toEither)))

  def schema[F[_]: Effect, R, A](implicit ev: Schema[R, A]): Schema[R, F[A]] =
    new Schema[R, F[A]] {
      override def toType(isInput: Boolean, isSubscription: Boolean): __Type =
        ev.toType(isInput, isSubscription)

      override def optional: Boolean =
        ev.optional

      override def resolve(value: F[A]): Step[R] =
        QueryStep(ZQuery.fromEffect(value.toIO.to[Task].map(ev.resolve)))
    }
} 
Example 20
Source File: package.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.interop.cats

import caliban.schema.Schema
import caliban.{ CalibanError, GraphQL, GraphQLInterpreter, GraphQLResponse, InputValue }
import cats.effect.{ Async, Effect }
import zio.Runtime

package object implicits {

  implicit class CatsEffectGraphQLInterpreter[R, E](underlying: GraphQLInterpreter[R, E]) {

    def executeAsync[F[_]: Async](
      query: String,
      operationName: Option[String] = None,
      variables: Map[String, InputValue] = Map(),
      extensions: Map[String, InputValue] = Map(),
      skipValidation: Boolean = false,
      enableIntrospection: Boolean = true
    )(implicit runtime: Runtime[R]): F[GraphQLResponse[E]] =
      CatsInterop.executeAsync(underlying)(
        query,
        operationName,
        variables,
        extensions,
        skipValidation = skipValidation,
        enableIntrospection = enableIntrospection
      )

    def checkAsync[F[_]: Async](query: String)(implicit runtime: Runtime[R]): F[Unit] =
      CatsInterop.checkAsync(underlying)(query)
  }

  implicit class CatsEffectGraphQL[R, E](underlying: GraphQL[R]) {

    def interpreterAsync[F[_]: Async](implicit runtime: Runtime[R]): F[GraphQLInterpreter[R, CalibanError]] =
      CatsInterop.interpreterAsync(underlying)
  }

  implicit def effectSchema[F[_]: Effect, R, A](implicit ev: Schema[R, A]): Schema[R, F[A]] =
    CatsInterop.schema
} 
Example 21
Source File: MySqlInvoiceList.scala    From event-sourcing-kafka-streams   with MIT License 5 votes vote down vote up
package org.amitayh.invoices.dao

import cats.Monad
import cats.effect.{Async, ContextShift, Resource}
import cats.syntax.functor._
import doobie.free.connection.ConnectionIO
import doobie.hikari.HikariTransactor
import doobie.implicits._
import doobie.util.ExecutionContexts
import doobie.util.transactor.Transactor

class MySqlInvoiceList[F[_]: Monad](transactor: Transactor[F]) extends InvoiceList[F] {
  override def save(record: InvoiceRecord): F[Unit] =
    MySqlInvoiceList.save(record).transact(transactor)

  override def get: F[List[InvoiceRecord]] =
    MySqlInvoiceList.get.transact(transactor)
}

object MySqlInvoiceList {
  def save(record: InvoiceRecord): ConnectionIO[Unit] = {
    import record._
    val sql = sql"""
      INSERT INTO invoices (id, version, updated_at, customer_name, customer_email, issue_date, due_date, total, status)
      VALUES ($id, $version, $updatedAt, $customerName, $customerEmail, $issueDate, $dueDate, $total, $status)
      ON DUPLICATE KEY UPDATE
        version = VALUES(version),
        updated_at = VALUES(updated_at),
        customer_name = VALUES(customer_name),
        customer_email = VALUES(customer_email),
        issue_date = VALUES(issue_date),
        due_date = VALUES(due_date),
        total = VALUES(total),
        status = VALUES(status)
    """
    sql.update.run.void
  }

  def get: ConnectionIO[List[InvoiceRecord]] = {
    val sql = sql"""
      SELECT id, version, updated_at, customer_name, customer_email, issue_date, due_date, total, status
      FROM invoices
      WHERE status IN ('New', 'Paid')
      ORDER BY created_at DESC
    """
    sql.query[InvoiceRecord].to[List]
  }

  def resource[F[_]: Async: ContextShift]: Resource[F, MySqlInvoiceList[F]] = for {
    connectEC <- ExecutionContexts.fixedThreadPool[F](32)
    transactEC <- ExecutionContexts.cachedThreadPool[F]
    transactor <- HikariTransactor.newHikariTransactor[F](
      driverClassName = sys.env("DB_DRIVER"),
      url = sys.env("DB_URL"),
      user = sys.env("DB_USER"),
      pass = sys.env("DB_PASS"),
      connectEC = connectEC,
      transactEC = transactEC)
  } yield new MySqlInvoiceList[F](transactor)
} 
Example 22
Source File: DoobieHikariModule.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.doobie

import java.util.Properties
import java.util.concurrent.{ScheduledExecutorService, ThreadFactory}

import cats.Show
import cats.effect.{Async, Blocker, ContextShift, Resource, Sync}
import cats.syntax.show._
import com.zaxxer.hikari.HikariConfig
import com.zaxxer.hikari.metrics.MetricsTrackerFactory
import doobie.enum.TransactionIsolation
import doobie.hikari.HikariTransactor

import scala.concurrent.ExecutionContext

object DoobieHikariModule {

  
  def make[F[_]: Async](
      config: DoobieHikariConfig,
      boundedConnectExecutionContext: ExecutionContext,
      blocker: Blocker,
      metricsTrackerFactory: Option[MetricsTrackerFactory] = None
  )(implicit cs: ContextShift[F]): Resource[F, HikariTransactor[F]] = {
    for {
      hikariConfig <- Resource.liftF(makeHikariConfig(config, metricsTrackerFactory))
      transactor <- HikariTransactor.fromHikariConfig(hikariConfig, boundedConnectExecutionContext, blocker)
    } yield transactor
  }

  implicit private val transactionIsolationShow: Show[TransactionIsolation] = {
    case TransactionIsolation.TransactionNone            => "TRANSACTION_NONE"
    case TransactionIsolation.TransactionReadUncommitted => "TRANSACTION_READ_UNCOMMITTED"
    case TransactionIsolation.TransactionReadCommitted   => "TRANSACTION_READ_COMMITTED"
    case TransactionIsolation.TransactionRepeatableRead  => "TRANSACTION_REPEATABLE_READ"
    case TransactionIsolation.TransactionSerializable    => "TRANSACTION_SERIALIZABLE"
  }

  private def makeHikariConfig[F[_]: Sync](
      config: DoobieHikariConfig,
      metricsTrackerFactory: Option[MetricsTrackerFactory],
      scheduledExecutorService: Option[ScheduledExecutorService] = None,
      threadFactory: Option[ThreadFactory] = None
  ): F[HikariConfig] = {
    Sync[F].delay {
      val c = new HikariConfig()
      c.setDriverClassName(config.driver)
      c.setJdbcUrl(config.url)
      c.setUsername(config.username)
      c.setPassword(config.password)
      c.setAutoCommit(config.autoCommit)
      c.setConnectionTimeout(config.connectionTimeout.toMillis)
      c.setIdleTimeout(config.idleTimeout.toMillis)
      c.setMaxLifetime(config.maxLifeTime.toMillis)
      c.setMinimumIdle(config.minimumIdle)
      c.setMaximumPoolSize(config.maximumPoolSize)
      c.setReadOnly(config.readOnly)
      c.setAllowPoolSuspension(config.allowPoolSuspension)
      c.setIsolateInternalQueries(config.isolateInternalQueries)
      c.setRegisterMbeans(config.registerMBeans)
      val dataSourceProperties = new Properties()
      config.dataSourceProperties.foreach { case (k, v) => dataSourceProperties.put(k, v) }
      c.setDataSourceProperties(dataSourceProperties)

      config.leakDetectionThreshold.map(_.toMillis).foreach(c.setLeakDetectionThreshold)
      config.initializationFailTimeout.map(_.toMillis).foreach(c.setInitializationFailTimeout)
      config.poolName.foreach(c.setPoolName)
      config.validationTimeout.map(_.toMillis).foreach(c.setValidationTimeout)
      config.transactionIsolation.map(_.show).foreach(c.setTransactionIsolation)

      scheduledExecutorService.foreach(c.setScheduledExecutor)
      threadFactory.foreach(c.setThreadFactory)

      metricsTrackerFactory.foreach(c.setMetricsTrackerFactory)
      c
    }
  }

} 
Example 23
Source File: bulk.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic.syntax

import cats.effect.Async
import fs2.Stream
import org.elasticsearch.action.bulk.{BulkItemResponse, BulkRequest, BulkResponse}
import org.elasticsearch.client.RestHighLevelClient
import com.alessandromarrella.fs2_elastic.io
import org.apache.http.Header

private[syntax] trait bulk {
  implicit class ElasticClientBulkOps[F[_]](
      val client: Stream[F, RestHighLevelClient]) {
    def bulk(bulkRequest: BulkRequest, headers: Header*): Stream[F, BulkResponse] =
      client.through(io.bulk.bulk(bulkRequest, headers:_*))
  }
  implicit class BulkOps[F[_]](
      val bulkResponseStream: Stream[F, BulkResponse]) {
    def stream(implicit F: Async[F]): Stream[F, BulkItemResponse] =
      streamFromJavaIterable(bulkResponseStream)
  }

}

object bulk extends bulk 
Example 24
Source File: JsMessageBuilder.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.network.http

import java.nio.ByteBuffer

import cats.effect.{Async, IO}
import org.scalajs.dom.{Blob, Event, FileReader, UIEvent}

import scala.concurrent.Promise
import scala.scalajs.js.typedarray.TypedArrayBufferOps._
import scala.scalajs.js.typedarray._
import scala.scalajs.js.|

trait JsMessageBuilder[F[_], P] {
  import JsMessageBuilder._

  def responseType: String

  def pack(payload: P): Message

  def unpack(msg: Message): F[Option[P]]
}

object JsMessageBuilder {
  type Message = String | ArrayBuffer | Blob

  implicit def JsMessageBuilderString[F[_]](implicit F: Async[F]): JsMessageBuilder[F, String] = new JsMessageBuilder[F, String] {
    val responseType = ""

    def pack(payload: String): Message = payload

    def unpack(msg: Message): F[Option[String]] = (msg: Any) match {
      case s: String => F.pure(Some(s))
      case b: Blob   => readBlob[F, String, String](_.readAsText(b))(identity)
      case _         => F.pure(None)
    }
  }

  implicit def JsMessageBuilderByteBuffer[F[_]](implicit F: Async[F]): JsMessageBuilder[F, ByteBuffer] = new JsMessageBuilder[F, ByteBuffer] {
    val responseType = "arraybuffer"

    def pack(payload: ByteBuffer): Message = payload.arrayBuffer.slice(payload.position, payload.limit)

    def unpack(msg: Message): F[Option[ByteBuffer]] = (msg: Any) match {
      case a: ArrayBuffer => F.pure(Option(TypedArrayBuffer.wrap(a)))
      case b: Blob        => readBlob[F, ArrayBuffer, ByteBuffer](_.readAsArrayBuffer(b))(TypedArrayBuffer.wrap(_))
      case _              => F.pure(None)
    }
  }

  private def readBlob[F[_], R, W](doRead: FileReader => Unit)(conv: R => W)(implicit F: Async[F]): F[Option[W]] = {
    val promise = Promise[Option[W]]()
    val reader  = new FileReader
    reader.onload = (_: UIEvent) => {
      val s = reader.result.asInstanceOf[R]
      promise.success(Option(conv(s)))
    }
    reader.onerror = (_: Event) => {
      promise.success(None)
    }
    doRead(reader)
    F.liftIO(IO.fromFuture(IO(promise.future)))
  }
} 
Example 25
Source File: HttpTransport.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.network.http

import cats.effect.Async
import cats.implicits._
import io.circe.Json
import jbok.network.rpc._
import io.circe.parser._

object HttpTransport {
  def apply[F[_]](baseUri: String)(implicit F: Async[F]): RpcTransport[F, Json] =
    new RpcTransport[F, Json] {
      override def fetch(request: RpcRequest[Json]): F[RpcResponse[Json]] = {
        val uri = (baseUri :: request.path).mkString("/")
        for {
          response <- HttpClient.post[F](uri, request.payload.noSpaces)
          resp     <- F.fromEither(decode[RpcResponse[Json]](response.data))
        } yield resp
      }
    }
} 
Example 26
Source File: Doobie.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.app.service.store.doobie

import cats.effect.{Async, ContextShift, Resource}
import doobie._
import doobie.hikari.HikariTransactor
import jbok.core.config.DatabaseConfig

object Doobie {
  def xa[F[_]](config: DatabaseConfig)(implicit F: Async[F], cs: ContextShift[F]): Resource[F, Transactor[F]] =
    for {
      ce <- ExecutionContexts.fixedThreadPool[F](32) // our connect EC
      te <- ExecutionContexts.cachedThreadPool[F]    // our transaction EC
      xa <- HikariTransactor.newHikariTransactor[F](
        config.driver,
        config.url,
        config.user,     // username
        config.password, // password
        ce,              // await connection here
        te               // execute JDBC operations here
      )
    } yield xa
} 
Example 27
Source File: DoobieCheckSpec.scala    From sup   with Apache License 2.0 5 votes vote down vote up
package sup

import _root_.doobie.Transactor
import cats.effect.Async
import cats.effect.ContextShift
import cats.effect.IO
import cats.effect.Timer
import scala.concurrent.duration._
import cats.implicits._
import scala.concurrent.ExecutionContext

class DoobieCheckSpec extends BaseIOTest {

  def goodTransactor[F[_]: Async: ContextShift]: Transactor[F] =
    Transactor.fromDriverManager[F]("org.h2.Driver", "jdbc:h2:mem:")

  def badTransactor[F[_]: Async: ContextShift]: Transactor[F] =
    Transactor.fromDriverManager[F]("org.h2.Driver", "jdbcfoobarnope")

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)

  "IO H2 check" when {
    "the database responds before the timeout" should {
      "be Healthy" in runIO {
        val healthCheck = modules.doobie.connectionCheck(goodTransactor[IO])(timeout = 5.seconds.some)

        healthCheck.check.map {
          _.value shouldBe Health.Healthy
        }
      }
    }

    "there is no timeout" should {
      "be Healthy" in runIO {
        val healthCheck = modules.doobie.connectionCheck(goodTransactor[IO])(timeout = none)

        healthCheck.check.map {
          _.value shouldBe Health.Healthy
        }
      }
    }
  }
} 
Example 28
Source File: KafkaAdminAlgebra.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import cats.effect.concurrent.Ref
import cats.effect.{Async, Concurrent, ContextShift, Resource, Sync}
import cats.implicits._
import fs2.kafka._
import hydra.core.protocol._
import hydra.kafka.util.KafkaUtils.TopicDetails
import org.apache.kafka.clients.admin.NewTopic
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException

import scala.util.control.NoStackTrace


  def deleteTopic(name: String): F[Unit]
}

object KafkaAdminAlgebra {

  type TopicName = String
  final case class Topic(name: TopicName, numberPartitions: Int)

  def live[F[_]: Sync: Concurrent: ContextShift](
      bootstrapServers: String,
  ): F[KafkaAdminAlgebra[F]] = Sync[F].delay {
    new KafkaAdminAlgebra[F] {

      override def describeTopic(name: TopicName): F[Option[Topic]] = {
        getAdminClientResource
          .use(_.describeTopics(name :: Nil))
          .map(_.headOption.map(_._2).map { td =>
            Topic(td.name(), td.partitions().size())
          })
          .recover {
            case _: UnknownTopicOrPartitionException => None
          }
      }

      override def getTopicNames: F[List[TopicName]] =
        getAdminClientResource.use(_.listTopics.names.map(_.toList))

      override def createTopic(name: TopicName, d: TopicDetails): F[Unit] = {
        import scala.collection.JavaConverters._
        val newTopic = new NewTopic(name, d.numPartitions, d.replicationFactor)
          .configs(d.configs.asJava)
        getAdminClientResource.use(_.createTopic(newTopic))
      }

      override def deleteTopic(name: String): F[Unit] =
        getAdminClientResource.use(_.deleteTopic(name))

      private def getAdminClientResource: Resource[F, KafkaAdminClient[F]] = {
        adminClientResource(
          AdminClientSettings.apply.withBootstrapServers(bootstrapServers)
        )
      }
    }
  }

  def test[F[_]: Sync]: F[KafkaAdminAlgebra[F]] =
    Ref[F].of(Map[TopicName, Topic]()).flatMap(getTestKafkaClient[F])

  private[this] def getTestKafkaClient[F[_]: Sync](
      ref: Ref[F, Map[TopicName, Topic]]
  ): F[KafkaAdminAlgebra[F]] = Sync[F].delay {
    new KafkaAdminAlgebra[F] {
      override def describeTopic(name: TopicName): F[Option[Topic]] =
        ref.get.map(_.get(name))

      override def getTopicNames: F[List[TopicName]] =
        ref.get.map(_.keys.toList)

      override def createTopic(
          name: TopicName,
          details: TopicDetails
      ): F[Unit] = {
        val entry = name -> Topic(name, details.numPartitions)
        ref.update(old => old + entry)
      }

      override def deleteTopic(name: String): F[Unit] =
        ref.update(_ - name)
    }
  }

} 
Example 29
Source File: TaskInstancesSpecs.scala    From shims   with Apache License 2.0 5 votes vote down vote up
package shims.effect

import cats.Eq
import cats.laws.discipline.{ApplicativeTests, ParallelTests}

import cats.effect.{Async, IO}
import cats.effect.laws.discipline.{arbitrary, EffectTests}, arbitrary._
import cats.effect.laws.util.{TestContext, TestInstances}, TestInstances._

import cats.instances.either._
import cats.instances.int._
import cats.instances.tuple._
import cats.instances.unit._

import scalaz.Tag
import scalaz.concurrent.Task
import scalaz.concurrent.Task.ParallelTask

import org.specs2.Specification
import org.specs2.scalacheck.Parameters
import org.specs2.specification.core.Fragments

import org.typelevel.discipline.Laws
import org.typelevel.discipline.specs2.Discipline

import java.util.concurrent.RejectedExecutionException

import scala.concurrent.ExecutionContext

object TaskInstancesSpecs extends Specification with Discipline {
  import TaskArbitrary._
  import Task.taskParallelApplicativeInstance

  def is = br ^ taskEff ^ br ^ taskPar ^ br ^ parTaskApp ^ br ^ asyncShiftTask

  def taskEff = checkAllAsync("Effect[Task]",
    implicit ctx => EffectTests[Task].effect[Int, Int, Int])

  def taskPar = checkAllAsync("Parallel[Task]",
    implicit ctx => ParallelTests[Task].parallel[Int, Int])

  def parTaskApp = checkAllAsync("Parallel[Task]", { implicit ctx =>
    val tests = ApplicativeTests[ParallelTask]
    tests.applicative[Int, Int, Int]
  })

  def asyncShiftTask = {
    implicit val context: TestContext = TestContext()
    val boom = new RejectedExecutionException("Boom")
    val rejectingEc = new ExecutionContext {
      def execute(runnable: Runnable): Unit = throw boom
      def reportFailure(cause: Throwable): Unit = ()
    }

    "async.shift on rejecting execution context" ! {
      Eq[Task[Unit]].eqv(Async.shift[Task](rejectingEc), Task.fail(boom)) must beTrue
    }
  }

  def checkAllAsync(name: String, f: TestContext => Laws#RuleSet)(implicit p: Parameters) = {
    val context = TestContext()
    val ruleSet = f(context)

    Fragments.foreach(ruleSet.all.properties.toList) {
      case (id, prop) =>
        id ! check(prop, p, defaultFreqMapPretty) ^ br
    }
  }

  implicit def taskEq[A: Eq](implicit ctx: TestContext): Eq[Task[A]] =
    Eq.by(ta => IO.async[A](k => ta.unsafePerformAsync(e => k(e.toEither))))

  implicit def parallelTaskEq[A: Eq](implicit ctx: TestContext): Eq[ParallelTask[A]] =
    Tag.subst(taskEq[A])
} 
Example 30
Source File: AjaxClient.scala    From canoe   with MIT License 5 votes vote down vote up
package canoe.api.clients

import canoe.api.{FailedMethod, ResponseDecodingError, TelegramClient}
import canoe.methods.Method
import canoe.models.Response
import cats.effect.{Async, ContextShift}
import cats.syntax.all._
import io.circe.Decoder
import io.circe.parser.decode
import org.scalajs.dom.console
import org.scalajs.dom.ext.Ajax

private[api] class AjaxClient[F[_]: Async: ContextShift](token: String) extends TelegramClient[F] {

  private val botApiUri: String = s"https://api.telegram.org/bot$token"

  
  def execute[Req, Res](request: Req)(implicit M: Method[Req, Res]): F[Res] = {
    implicit val responseDecoder: Decoder[Response[Res]] = Response.decoder[Res](M.decoder)

    sendJsonRequest(request, M).map(decode[Response[Res]]).flatMap {
      case Left(error)     => handleUnknownEntity(M.name, request, error.getMessage)
      case Right(response) => handleTelegramResponse(M, request)(response)
    }
  }

  private def handleUnknownEntity[I, A](method: String, input: I, error: String): F[A] = {
    console.error(
      s"Received unknown Telegram entity during execution of '$method' method. \nInput data: $input. \n${error}"
    )
    ResponseDecodingError(error.toString).raiseError[F, A]
  }

  private def handleTelegramResponse[A, I, C](m: Method[I, A], input: I)(response: Response[A]): F[A] =
    response match {
      case Response(true, Some(result), _, _, _) => result.pure[F]

      case failed =>
        console.error(s"Received failed response from Telegram: $failed. Method name: ${m.name}, input data: $input")
        FailedMethod(m, input, failed).raiseError[F, A]
    }

  private def sendJsonRequest[Req, Res](request: Req, method: Method[Req, Res]): F[String] = {
    val url = s"$botApiUri/${method.name}"
    val json = method.encoder.apply(request).toString

    Async
      .fromFuture(F.delay(Ajax.post(url, json, headers = Map("Content-Type" -> "application/json"))))
      .map(_.responseText)
  }
} 
Example 31
Source File: Interpreter.scala    From hammock   with MIT License 5 votes vote down vote up
package hammock
package js

import cats._
import cats.effect.{Async, ContextShift}
import cats.syntax.applicative._
import cats.syntax.flatMap._
import cats.syntax.functor._
import cats.syntax.show._
import org.scalajs.dom.ext.Ajax
import org.scalajs.dom.ext.Ajax.InputData
import java.nio.ByteBuffer

object Interpreter {

  def apply[F[_]](implicit F: InterpTrans[F]): InterpTrans[F] = F

  implicit def instance[F[_]: Async: ContextShift]: InterpTrans[F] = new InterpTrans[F] {
    def trans: HttpF ~> F = {

      def doReq(reqF: HttpF[HttpResponse]): F[HttpResponse] = {
        val timeout = 0
        val headers = reqF.req.headers
        val data: InputData = reqF.req.entity.fold(InputData.str2ajax(""))(
          _.cata(
            string => InputData.str2ajax(string.content),
            bytes => InputData.byteBuffer2ajax(ByteBuffer.wrap(bytes.content)),
            Function.const(InputData.str2ajax("")))
        )
        val method = toMethod(reqF)

        for {
          response <- Async.fromFuture(
            Async[F].delay(Ajax(method.name, reqF.req.uri.show, data, timeout, headers, false, "")))
          responseHeaders <- parseHeaders(response.getAllResponseHeaders)
          status = Status.get(response.status)
          body   = response.responseText
        } yield HttpResponse(status, responseHeaders, Entity.StringEntity(body))
      }

      def toMethod(reqF: HttpF[HttpResponse]): Method = reqF match {
        case Options(_) => Method.OPTIONS
        case Get(_)     => Method.GET
        case Head(_)    => Method.HEAD
        case Post(_)    => Method.POST
        case Put(_)     => Method.PUT
        case Delete(_)  => Method.DELETE
        case Trace(_)   => Method.TRACE
        case Patch(_)   => Method.PATCH
      }

      def parseHeaders(str: String): F[Map[String, String]] = str match {
        case null => Map.empty[String, String].pure[F]
        case string =>
          Async[F].delay(
            string
              .split("\r\n")
              .map({ line =>
                val splitted = line.split(": ")
                (splitted.head, splitted.tail.mkString("").trim)
              })
              .toMap)
      }

      λ[HttpF ~> F] {
        case req @ (Options(_) | Get(_) | Head(_) | Post(_) | Put(_) | Delete(_) | Trace(_) | Patch(_)) => doReq(req)
      }
    }
  }
} 
Example 32
Source File: HttpExistenceClient.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.util

import cats.effect.{Async, Resource}
import cats.implicits._
import com.github.benmanes.caffeine.cache.Caffeine
import io.chrisdavenport.log4cats.Logger
import org.http4s.client.Client
import org.http4s.{Method, Request, Status, Uri}
import org.scalasteward.core.application.Config
import scalacache.CatsEffect.modes._
import scalacache.caffeine.CaffeineCache
import scalacache.{Async => _, _}

final class HttpExistenceClient[F[_]](statusCache: Cache[Status])(implicit
    client: Client[F],
    logger: Logger[F],
    mode: Mode[F],
    F: MonadThrowable[F]
) {
  def exists(uri: Uri): F[Boolean] =
    status(uri).map(_ === Status.Ok).handleErrorWith { throwable =>
      logger.debug(throwable)(s"Failed to check if $uri exists").as(false)
    }

  private def status(uri: Uri): F[Status] =
    statusCache.cachingForMemoizeF(uri.renderString)(None) {
      client.status(Request[F](method = Method.HEAD, uri = uri))
    }
}

object HttpExistenceClient {
  def create[F[_]](implicit
      config: Config,
      client: Client[F],
      logger: Logger[F],
      F: Async[F]
  ): Resource[F, HttpExistenceClient[F]] = {
    val buildCache = F.delay {
      CaffeineCache(
        Caffeine
          .newBuilder()
          .maximumSize(16384L)
          .expireAfterWrite(config.cacheTtl.length, config.cacheTtl.unit)
          .build[String, Entry[Status]]()
      )
    }
    Resource.make(buildCache)(_.close().void).map(new HttpExistenceClient[F](_))
  }
} 
Example 33
Source File: HttpClient.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.network.http

import cats.effect.{Async, IO}
import jbok.network.facade.{Axios, Config, Response}

import scala.scalajs.js

object HttpClient {
  def request[F[_]](config: Config)(implicit F: Async[F]): F[Response] =
    F.liftIO(IO.fromFuture(IO(Axios.request(config).toFuture)))

  def get[F[_]](url: String)(implicit F: Async[F]): F[Response] =
    request[F](new Config(url))

  def post[F[_]](url: String, _data: String)(implicit F: Async[F]): F[Response] =
    request[F](new Config(url) {
      override val method: String = "post"
      override val data: js.Any   = _data
    })
} 
Example 34
Source File: search.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic.syntax

import cats.effect.Async
import fs2.Stream
import org.elasticsearch.action.search.{SearchRequest, SearchResponse, SearchScrollRequest}
import org.elasticsearch.client.RestHighLevelClient
import org.elasticsearch.search.{SearchHit, SearchHits}

import scala.concurrent.duration.Duration
import com.alessandromarrella.fs2_elastic.io
import org.apache.http.Header

private[syntax] trait search {

  type SearchResultMaybe[A] =
    Option[(A, (RestHighLevelClient, SearchScrollRequest, SearchResponse))]

  implicit class ElasticClientSearchOps[F[_]](
      val client: Stream[F, RestHighLevelClient]) {

    def search(searchRequest: SearchRequest, headers:Header*): Stream[F, SearchResponse] =
      client.through(io.search.search(searchRequest))

    def searchScroll(searchRequest: SearchRequest, duration: Duration, headers:Header*)(
        implicit F: Async[F])
      : Stream[F, (RestHighLevelClient, SearchResponse)] =
      client.through(io.search.searchScroll(searchRequest, duration, headers:_*))
  }

  implicit class SearchResponseOps[F[_]](
      val searchResponse: Stream[F, SearchResponse]) {
    def hits: Stream[F, SearchHits] =
      searchResponse.through(io.search.hits[F])
  }

  implicit class SearchScrollResponseOps[F[_]](
      val searchResponse: Stream[F, (RestHighLevelClient, SearchResponse)]) {
    def hitsScroll(implicit F: Async[F]): Stream[F, SearchHits] =
      searchResponse.through(io.search.hitsScroll[F])
  }

  implicit class SearchHitOps[F[_]](val hitsStream: Stream[F, SearchHits]) {
    def stream(implicit F: Async[F]): Stream[F, SearchHit] =
      streamFromJavaIterable(hitsStream)
  }

}

object search extends search 
Example 35
Source File: package.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic

import cats.effect.Async
import fs2.Stream

import org.elasticsearch.common.unit.TimeValue

import scala.concurrent.duration.Duration
import scala.collection.JavaConverters._

package object syntax {

  private[syntax] type IteratorResultMaybe[A] = Option[(A, Iterator[A])]

  private[syntax] def streamFromJavaIterable[F[_], A](
      inputStream: Stream[F, java.lang.Iterable[A]])(
      implicit F: Async[F]): Stream[F, A] =
    streamFromIterable(inputStream.map(_.asScala))

  private[syntax] def streamFromIterable[F[_], A](
      inputStream: Stream[F, Iterable[A]])(implicit F: Async[F]): Stream[F, A] =
    inputStream.flatMap(a =>
      Stream.unfoldEval(a.iterator) { i =>
        if (i.hasNext) F.delay[IteratorResultMaybe[A]](Some((i.next(), i)))
        else F.delay[IteratorResultMaybe[A]](Option.empty)
    })

  private[syntax] def durationToTimeValue(duration: Duration): TimeValue =
    TimeValue.timeValueNanos(duration.toNanos)

} 
Example 36
Source File: search.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic.io

import cats.effect.Async
import fs2.{Pipe, Stream}
import org.apache.http.Header
import org.elasticsearch.action.search.{SearchRequest, SearchResponse, SearchScrollRequest}
import org.elasticsearch.client.RestHighLevelClient
import org.elasticsearch.search.SearchHits

import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration

private[io] trait search {

  type SearchResultMaybe[A] = Option[(A, SearchResponse)]

  def search[F[_]](searchRequest: SearchRequest, headers: Header*)
    : Pipe[F, RestHighLevelClient, SearchResponse] =
    client => client.map(_.search(searchRequest, headers:_*))

  def searchScroll[F[_]](searchRequest: SearchRequest, duration: Duration, headers: Header*)(
      implicit F: Async[F])
    : Pipe[F, RestHighLevelClient, (RestHighLevelClient, SearchResponse)] =
    client =>
      client.map(c =>
        (c, c.search(searchRequest.scroll(durationToTimeValue(duration)), headers:_*)))

  def hits[F[_]]: Pipe[F, SearchResponse, SearchHits] =
    response => response.map(_.getHits)

  def hitsScroll[F[_]](implicit F: Async[F])
    : Pipe[F, (RestHighLevelClient, SearchResponse), SearchHits] =
    input =>
      input.flatMap {
        case (client, response) =>
          Stream.unfoldEval(response) { res =>
            F.delay[SearchResultMaybe[SearchHits]](
              Option[SearchHits](res.getHits).flatMap {
                case hits if hits.asScala.nonEmpty =>
                  val newRequest = new SearchScrollRequest(res.getScrollId)
                  val newHits = client.searchScroll(newRequest)
                  Some((hits, newHits))
                case _ =>
                  None
              })
          }
    }

}

object search extends search 
Example 37
Source File: StateMachineFixture.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing

import cats.effect.{Async, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.sourcing.Command._
import ch.epfl.bluebrain.nexus.sourcing.Rejection.InvalidRevision
import ch.epfl.bluebrain.nexus.sourcing.State.{Current, Initial}

object StateMachineFixture {

  val initialState: State = State.Initial

  def evaluate[F[_]](state: State, cmd: Command)(implicit F: Async[F], T: Timer[F]): F[Either[Rejection, State]] =
    (state, cmd) match {
      case (Current(revS, _), Boom(revC, message)) if revS == revC                      => F.raiseError(new RuntimeException(message))
      case (Initial, Boom(rev, message)) if rev == 0                                    => F.raiseError(new RuntimeException(message))
      case (_, Boom(rev, _))                                                            => F.pure(Left(InvalidRevision(rev)))
      case (Current(revS, _), Never(revC)) if revS == revC                              => F.never
      case (Initial, Never(rev)) if rev == 0                                            => F.never
      case (_, Never(rev))                                                              => F.pure(Left(InvalidRevision(rev)))
      case (Initial, Increment(rev, step)) if rev == 0                                  => F.pure(Right(State.Current(1, step)))
      case (Initial, Increment(rev, _))                                                 => F.pure(Left(InvalidRevision(rev)))
      case (Initial, IncrementAsync(rev, step, duration)) if rev == 0                   =>
        T.sleep(duration) >> F.pure(Right(State.Current(1, step)))
      case (Initial, IncrementAsync(rev, _, _))                                         => F.pure(Left(InvalidRevision(rev)))
      case (Initial, Initialize(rev)) if rev == 0                                       => F.pure(Right(State.Current(1, 0)))
      case (Initial, Initialize(rev))                                                   => F.pure(Left(InvalidRevision(rev)))
      case (Current(revS, value), Increment(revC, step)) if revS == revC                =>
        F.pure(Right(State.Current(revS + 1, value + step)))
      case (Current(_, _), Increment(revC, _))                                          => F.pure(Left(InvalidRevision(revC)))
      case (Current(revS, value), IncrementAsync(revC, step, duration)) if revS == revC =>
        T.sleep(duration) >> F.pure(Right(State.Current(revS + 1, value + step)))
      case (Current(_, _), IncrementAsync(revC, _, duration))                           =>
        T.sleep(duration) >> F.pure(Left(InvalidRevision(revC)))
      case (Current(revS, _), Initialize(revC)) if revS == revC                         => F.pure(Right(State.Current(revS + 1, 0)))
      case (Current(_, _), Initialize(rev))                                             => F.pure(Left(InvalidRevision(rev)))
    }

} 
Example 38
Source File: AggregateFixture.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing

import cats.effect.{Async, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.sourcing.Command._
import ch.epfl.bluebrain.nexus.sourcing.Event.{Incremented, Initialized}
import ch.epfl.bluebrain.nexus.sourcing.Rejection.InvalidRevision
import ch.epfl.bluebrain.nexus.sourcing.State.{Current, Initial}

object AggregateFixture {

  val initialState: State = State.Initial

  val next: (State, Event) => State = {
    case (Initial, Incremented(1, step))             => State.Current(1, step)
    case (Initial, Initialized(rev))                 => State.Current(rev, 0)
    case (Current(_, value), Incremented(rev, step)) => State.Current(rev, value + step)
    case (Current(_, _), Initialized(rev))           => State.Current(rev, 0)
    case (other, _)                                  => other
  }

  def evaluate[F[_]](state: State, cmd: Command)(implicit F: Async[F], T: Timer[F]): F[Either[Rejection, Event]] =
    (state, cmd) match {
      case (Current(revS, _), Boom(revC, message)) if revS == revC                  => F.raiseError(new RuntimeException(message))
      case (Initial, Boom(rev, message)) if rev == 0                                => F.raiseError(new RuntimeException(message))
      case (_, Boom(rev, _))                                                        => F.pure(Left(InvalidRevision(rev)))
      case (Current(revS, _), Never(revC)) if revS == revC                          => F.never
      case (Initial, Never(rev)) if rev == 0                                        => F.never
      case (_, Never(rev))                                                          => F.pure(Left(InvalidRevision(rev)))
      case (Initial, Increment(rev, step)) if rev == 0                              => F.pure(Right(Incremented(1, step)))
      case (Initial, Increment(rev, _))                                             => F.pure(Left(InvalidRevision(rev)))
      case (Initial, IncrementAsync(rev, step, duration)) if rev == 0               =>
        T.sleep(duration) >> F.pure(Right(Incremented(1, step)))
      case (Initial, IncrementAsync(rev, _, _))                                     => F.pure(Left(InvalidRevision(rev)))
      case (Initial, Initialize(rev)) if rev == 0                                   => F.pure(Right(Initialized(1)))
      case (Initial, Initialize(rev))                                               => F.pure(Left(InvalidRevision(rev)))
      case (Current(revS, _), Increment(revC, step)) if revS == revC                => F.pure(Right(Incremented(revS + 1, step)))
      case (Current(_, _), Increment(revC, _))                                      => F.pure(Left(InvalidRevision(revC)))
      case (Current(revS, _), IncrementAsync(revC, step, duration)) if revS == revC =>
        T.sleep(duration) >> F.pure(Right(Incremented(revS + 1, step)))
      case (Current(_, _), IncrementAsync(revC, _, duration))                       =>
        T.sleep(duration) >> F.pure(Left(InvalidRevision(revC)))
      case (Current(revS, _), Initialize(revC)) if revS == revC                     => F.pure(Right(Initialized(revS + 1)))
      case (Current(_, _), Initialize(rev))                                         => F.pure(Left(InvalidRevision(rev)))
    }

} 
Example 39
Source File: ProjectAttributesCoordinator.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.async

import akka.actor.{ActorRef, ActorSystem}
import cats.effect.Async
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.kg.async.ProjectAttributesCoordinatorActor.Msg._
import ch.epfl.bluebrain.nexus.kg.cache.ProjectCache
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.kg.resources.syntax._
import ch.epfl.bluebrain.nexus.kg.resources.{Files, OrganizationRef}
import ch.epfl.bluebrain.nexus.kg.storage.Storage.StorageOperations.FetchAttributes
import ch.epfl.bluebrain.nexus.service.config.ServiceConfig
import ch.epfl.bluebrain.nexus.sourcing.projections.Projections
import monix.eval.Task


  def stop(projectRef: ProjectRef): F[Unit] = {
    ref ! Stop(projectRef.id)
    F.unit
  }
}

object ProjectAttributesCoordinator {
  def apply(files: Files[Task], projectCache: ProjectCache[Task])(implicit
      config: ServiceConfig,
      fetchAttributes: FetchAttributes[Task],
      as: ActorSystem,
      P: Projections[Task, String]
  ): ProjectAttributesCoordinator[Task] = {
    val coordinatorRef = ProjectAttributesCoordinatorActor.start(files, None, config.cluster.shards)
    new ProjectAttributesCoordinator[Task](projectCache, coordinatorRef)
  }
} 
Example 40
Source File: PostgresTransactor.scala    From ticket-booking-aecor   with Apache License 2.0 5 votes vote down vote up
package ru.pavkin.booking.common.postgres

import cats.effect.{Async, ContextShift, Resource}
import doobie.hikari.HikariTransactor
import doobie.util.ExecutionContexts
import ru.pavkin.booking.config.PostgresConfig

object PostgresTransactor {
  def transactor[F[_]](
    config: PostgresConfig
  )(implicit F: Async[F], C: ContextShift[F]): Resource[F, HikariTransactor[F]] =
    for {
      ce <- ExecutionContexts.fixedThreadPool[F](32)
      te <- ExecutionContexts.cachedThreadPool[F]
      tr <- HikariTransactor.newHikariTransactor[F](
             "org.postgresql.Driver",
             s"jdbc:postgresql://${config.contactPoints}:${config.port}/${config.database}",
             config.username,
             config.password,
             ce,
             te
           )
      _ <- Resource.liftF(tr.configure(ds => F.delay(ds.setAutoCommit(false))))
    } yield tr
} 
Example 41
Source File: Issue455.scala    From guardrail   with MIT License 5 votes vote down vote up
package core.issues

import cats.implicits._
import org.http4s.implicits._
import org.scalatest.{ EitherValues, FunSuite, Matchers }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.SpanSugar._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import cats.effect.Async
import cats.effect.implicits._
import cats.effect.IO
import org.http4s.client.{ Client => Http4sClient }

class Issue455Suite extends FunSuite with Matchers with EitherValues with ScalaFutures {
  override implicit val patienceConfig = PatienceConfig(10 seconds, 1 second)

  test("Circe NPE: https://github.com/circe/circe/issues/561") {
    val route = {
      import issues.issue455.server.http4s.{ BooResponse, Handler, Resource }
      import issues.issue455.server.http4s.definitions.RecursiveData
      new Resource[IO].routes(new Handler[IO] {
        val recData                                                              = RecursiveData(3, "three", Some(RecursiveData(2, "two", Some(RecursiveData(1, "one", None)))))
        def boo(respond: BooResponse.type)(body: RecursiveData): IO[BooResponse] = IO.pure(respond.Ok(recData))
      })
    }
    {
      import issues.issue455.client.http4s.Client
      import issues.issue455.client.http4s.definitions.RecursiveData
      val recData = RecursiveData(3, "three", Some(RecursiveData(2, "two", Some(RecursiveData(1, "one", None)))))
      val client  = Client.httpClient(Http4sClient.fromHttpApp[IO](route.orNotFound))
      val resp    = client.boo(recData).unsafeToFuture.futureValue
      resp.fold(handleOk = {
        case `recData` => ()
        case data      => fail(s"${data} != ${recData}")
      })
    }
  }
} 
Example 42
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.process

import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M],
                 materializer: Materializer)(implicit F: Async[F]): F[Unit] =
      F.bracket(
        F.delay(
          source
            .viaMat(KillSwitches.single)(Keep.right)
            .toMat(Sink.ignore)(Keep.both)
            .run()(materializer)
        )
      )(x => F.fromFuture(x._2).void)(x => F.delay(x._1.shutdown()))

  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 43
Source File: Algebras.scala    From hydra   with Apache License 2.0 4 votes vote down vote up
package hydra.ingest.modules

import cats.effect.{Async, ConcurrentEffect, ContextShift, Timer}
import cats.implicits._
import hydra.avro.registry.SchemaRegistry
import hydra.ingest.app.AppConfig.AppConfig
import hydra.kafka.algebras.{KafkaAdminAlgebra, KafkaClientAlgebra, MetadataAlgebra}
import io.chrisdavenport.log4cats.Logger

final class Algebras[F[_]] private (
    val schemaRegistry: SchemaRegistry[F],
    val kafkaAdmin: KafkaAdminAlgebra[F],
    val kafkaClient: KafkaClientAlgebra[F],
    val metadata: MetadataAlgebra[F]
)

object Algebras {

  def make[F[_]: Async: ConcurrentEffect: ContextShift: Timer: Logger](config: AppConfig): F[Algebras[F]] =
    for {
      schemaRegistry <- SchemaRegistry.live[F](
        config.createTopicConfig.schemaRegistryConfig.fullUrl,
        config.createTopicConfig.schemaRegistryConfig.maxCacheSize
      )
      kafkaAdmin <- KafkaAdminAlgebra.live[F](config.createTopicConfig.bootstrapServers)
      kafkaClient <- KafkaClientAlgebra.live[F](config.createTopicConfig.bootstrapServers, schemaRegistry, config.ingestConfig.recordSizeLimitBytes)
      metadata <- MetadataAlgebra.make[F](config.v2MetadataTopicConfig.topicName.value,
        config.v2MetadataTopicConfig.consumerGroup, kafkaClient, schemaRegistry, config.v2MetadataTopicConfig.createOnStartup)
    } yield new Algebras[F](schemaRegistry, kafkaAdmin, kafkaClient, metadata)
}