cats.effect.Concurrent Scala Examples

The following examples show how to use cats.effect.Concurrent. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: Memoize.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.memo

import cats.effect.{Concurrent, ExitCase}
import cats.effect.concurrent.{Deferred, Ref}
import simulacrum.typeclass
import tofu.syntax.monadic._
import cats.syntax.option._
import cats.effect.syntax.concurrent._
import cats.effect.syntax.bracket._


  def memoizeOnSuccess[A](fa: F[A]): F[F[A]]
}

object Memoize {
  def concurrentMemoize[F[_]](implicit F: Concurrent[F]): Memoize[F] =
    new Memoize[F] {
      def memoize[A](fa: F[A]): F[F[A]] = Concurrent.memoize(fa)

      //copy of Concurrent.memoize accepting success only
      def memoizeOnSuccess[A](f: F[A]): F[F[A]] = {
        {
          sealed trait State
          case class Subs(n: Int) extends State
          case object Done        extends State

          case class Fetch(state: State, v: Deferred[F, A], stop: Deferred[F, F[Unit]])

          Ref[F].of(Option.empty[Fetch]).map { state =>
            (Deferred[F, A] product Deferred[F, F[Unit]]).flatMap {
              case (v, stop) =>
                def endState(ec: ExitCase[Throwable]) =
                  state.modify {
                    case None                          => throw new AssertionError("unreachable")
                    case s @ Some(Fetch(Done, _, _))   => s -> F.unit
                    case Some(Fetch(Subs(n), v, stop)) =>
                      if (ec == ExitCase.Canceled && n == 1) None -> stop.get.flatten
                      else if (ec == ExitCase.Canceled) Fetch(Subs(n - 1), v, stop).some -> F.unit
                      else Fetch(Done, v, stop).some                                     -> F.unit
                  }.flatten

                def fetch =
                  f.flatMap(v.complete)
                    .start
                    .flatMap(fiber => stop.complete(fiber.cancel))

                state.modify {
                  case s @ Some(Fetch(Done, v, _))   =>
                    s -> v.get
                  case Some(Fetch(Subs(n), v, stop)) =>
                    Fetch(Subs(n + 1), v, stop).some -> v.get.guaranteeCase(endState)
                  case None                          =>
                    Fetch(Subs(1), v, stop).some -> fetch.bracketCase(_ => v.get) { case (_, ec) => endState(ec) }
                }.flatten
            }
          }
        }
      }
    }
} 
Example 2
Source File: MessageSocket.scala    From fs2-chat   with MIT License 5 votes vote down vote up
package fs2chat

import cats.effect.Concurrent
import cats.implicits._
import fs2.Stream
import fs2.concurrent.Queue
import fs2.io.tcp.Socket
import scodec.{Decoder, Encoder}
import scodec.stream.{StreamDecoder, StreamEncoder}


trait MessageSocket[F[_], In, Out] {
  def read: Stream[F, In]
  def write1(out: Out): F[Unit]
}

object MessageSocket {

  def apply[F[_]: Concurrent, In, Out](
      socket: Socket[F],
      inDecoder: Decoder[In],
      outEncoder: Encoder[Out],
      outputBound: Int
  ): F[MessageSocket[F, In, Out]] =
    for {
      outgoing <- Queue.bounded[F, Out](outputBound)
    } yield new MessageSocket[F, In, Out] {
      def read: Stream[F, In] = {
        val readSocket = socket
          .reads(1024)
          .through(StreamDecoder.many(inDecoder).toPipeByte[F])

        val writeOutput = outgoing.dequeue
          .through(StreamEncoder.many(outEncoder).toPipeByte)
          .through(socket.writes(None))

        readSocket.concurrently(writeOutput)
      }
      def write1(out: Out): F[Unit] = outgoing.enqueue1(out)
    }
} 
Example 3
Source File: Timeout.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu

import cats.effect.{Concurrent, ContextShift, IO, Timer}
import simulacrum.typeclass
import tofu.syntax.feither._
import tofu.internal.NonTofu

import scala.concurrent.duration.FiniteDuration

@typeclass
trait Timeout[F[_]] {
  def timeoutTo[A](fa: F[A], after: FiniteDuration, fallback: F[A]): F[A]
}

object Timeout extends LowPriorTimeoutImplicits {
  implicit def io(implicit timer: Timer[IO], cs: ContextShift[IO]): Timeout[IO] = new Timeout[IO] {
    override def timeoutTo[A](fa: IO[A], after: FiniteDuration, fallback: IO[A]): IO[A] = fa.timeoutTo(after, fallback)
  }
}

trait LowPriorTimeoutImplicits { self: Timeout.type =>
  implicit def concurrent[F[_]: NonTofu](implicit F: Concurrent[F], timer: Timer[F]): Timeout[F] =
    new Timeout[F] {
      override def timeoutTo[A](fa: F[A], after: FiniteDuration, fallback: F[A]): F[A] =
        F.race(timer.sleep(after), fa).getOrElseF(fallback)
    }
} 
Example 4
Source File: Fire.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu

import cats.effect.{Concurrent, Fiber}
import simulacrum.typeclass
import tofu.internal.NonTofu

import scala.util.Either
import scala.annotation.nowarn

@typeclass
trait Fire[F[_]] {
  def fireAndForget[A](fa: F[A]): F[Unit]
}

object Fire extends StartInstances[Fire]

@typeclass
trait Race[F[_]] extends Fire[F] {
  def race[A, B](fa: F[A], fb: F[B]): F[Either[A, B]]
  def never[A]: F[A]
}

object Race extends StartInstances[Race]

@typeclass
trait Start[F[_]] extends Fire[F] with Race[F] {
  def start[A](fa: F[A]): F[Fiber[F, A]]
  def racePair[A, B](fa: F[A], fb: F[B]): F[Either[(A, Fiber[F, B]), (Fiber[F, A], B)]]
}

object Start extends StartInstances[Start]

trait StartInstances[TC[f[_]] >: Start[f]] {
  final implicit def concurrentInstance[F[_]](implicit F: Concurrent[F], @nowarn _nonTofu: NonTofu[F]): TC[F] =
    new Start[F] {
      def start[A](fa: F[A]): F[Fiber[F, A]]                                                = F.start(fa)
      def fireAndForget[A](fa: F[A]): F[Unit]                                               = F.void(start(fa))
      def racePair[A, B](fa: F[A], fb: F[B]): F[Either[(A, Fiber[F, B]), (Fiber[F, A], B)]] = F.racePair(fa, fb)
      def race[A, B](fa: F[A], fb: F[B]): F[Either[A, B]]                                   = F.race(fa, fb)
      def never[A]: F[A]                                                                    = F.never
    }

} 
Example 5
Source File: MakeSemaphore.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.concurrent

import cats.effect.concurrent.Semaphore
import cats.effect.{Concurrent, Sync}

trait MakeSemaphore[I[_], F[_]] {
  def semaphore(count: Long): I[Semaphore[F]]
}

object Semaphores {
  def apply[F[_]](implicit agents: Semaphores[F]): MakeSemaphore.Applier[F, F] = new MakeSemaphore.Applier[F, F](agents)
}

object MakeSemaphore {
  def apply[I[_], F[_]](implicit mksem: MakeSemaphore[I, F]) = new Applier[I, F](mksem)

  final class Applier[I[_], F[_]](private val mksem: MakeSemaphore[I, F]) extends AnyVal {
    def of(count: Long): I[Semaphore[F]] = mksem.semaphore(count)
  }

  implicit def concurrentSemaphore[I[_]: Sync, F[_]: Concurrent]: MakeSemaphore[I, F] = new MakeSemaphore[I, F] {
    def semaphore(count: Long): I[Semaphore[F]] = Semaphore.in[I, F](count)
  }
} 
Example 6
Source File: MakeDeferred.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.concurrent

import cats.effect.concurrent.{Deferred, TryableDeferred}
import cats.effect.{Concurrent, Sync}

trait MakeDeferred[I[_], F[_]] {
  def deferred[A]: I[Deferred[F, A]]
}

trait TryableDeferreds[F[_]] extends MakeDeferred[F, F] {
  def tryable[A]: F[TryableDeferred[F, A]]
}

object Deferreds {
  def apply[F[_], A](implicit make: Deferreds[F]): F[Deferred[F, A]] = make.deferred[A]
}

object MakeDeferred extends PolymorphicMakeDefferedInstance {
  def apply[I[_], F[_], A](implicit make: MakeDeferred[I, F]): I[Deferred[F, A]] = make.deferred[A]

  def tryable[F[_], A](implicit make: TryableDeferreds[F]): F[TryableDeferred[F, A]] = make.tryable[A]

  implicit def concurrentTryableDeferreds[F[_]: Concurrent]: TryableDeferreds[F] = new TryableDeferreds[F] {
    def deferred[A]: F[Deferred[F, A]]       = Deferred[F, A]
    def tryable[A]: F[TryableDeferred[F, A]] = Deferred.tryable[F, A]
  }
}
trait PolymorphicMakeDefferedInstance {
  implicit def concurrentMakeDeferred[I[_]: Sync, F[_]: Concurrent]: MakeDeferred[I, F] = new MakeDeferred[I, F] {
    def deferred[A]: I[Deferred[F, A]] = Deferred.in[I, F, A]
  }
} 
Example 7
Source File: MakeMVar.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.concurrent

import cats.effect.concurrent.MVar
import cats.effect.{Concurrent, Sync}

trait MakeMVar[I[_], F[_]] {
  def mvarOf[A](a: A): I[MVar[F, A]]
  def mvarEmpty[A]: I[MVar[F, A]]
}

object MVars {
  def apply[F[_]](implicit agents: MVars[F]): MakeMVar.Applier[F, F] = new MakeMVar.Applier[F, F](agents)
}

object MakeMVar {
  def apply[I[_], F[_]](implicit mkvar: MakeMVar[I, F]) = new Applier[I, F](mkvar)

  final class Applier[I[_], F[_]](private val makeMVar: MakeMVar[I, F]) extends AnyVal {
    def empty[A]: I[MVar[F, A]]    = makeMVar.mvarEmpty[A]
    def of[A](a: A): I[MVar[F, A]] = makeMVar.mvarOf(a)
  }

  implicit def concurrentMakeMVar[I[_]: Sync, F[_]: Concurrent]: MakeMVar[I, F] = new MakeMVar[I, F] {
    def mvarOf[A](a: A): I[MVar[F, A]] = MVar.in[I, F, A](a)
    def mvarEmpty[A]: I[MVar[F, A]]    = MVar.emptyIn[I, F, A]
  }
} 
Example 8
Source File: QVar.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.concurrent
import cats.effect.concurrent.MVar
import cats.effect.{Concurrent, Sync}
import cats.Applicative
import tofu.Guarantee
import tofu.concurrent.QVar.QVarByMVar
import tofu.higherKind.{RepresentableK, derived}
import tofu.syntax.monadic._


  def read: F[A]
}

object QVar {
  implicit def representableK[A]: RepresentableK[QVar[*[_], A]] = derived.genRepresentableK[QVar[*[_], A]]

  final implicit class QVarOps[F[_], A](private val self: QVar[F, A]) extends AnyVal {
    def toAtom(implicit F: Applicative[F], FG: Guarantee[F]): Atom[F, A] = Atom.QAtom(self)
  }

  final case class QVarByMVar[F[_], A](mvar: MVar[F, A]) extends QVar[F, A] {
    override def isEmpty: F[Boolean] = mvar.isEmpty
    override def put(a: A): F[Unit]  = mvar.put(a)
    override def take: F[A]          = mvar.take
    override def read: F[A]          = mvar.read
  }
}

trait MakeQVar[I[_], F[_]] {
  def qvarOf[A](a: A): I[QVar[F, A]]
  def qvarEmpty[A]: I[QVar[F, A]]
}

object QVars {
  def apply[F[_]](implicit qvars: QVars[F]): MakeQVar.Applier[F, F] = new MakeQVar.Applier(qvars)
}

object MakeQVar {
  def apply[I[_], F[_]](implicit mkvar: MakeQVar[I, F]) = new Applier[I, F](mkvar)

  final class Applier[I[_], F[_]](private val makeMVar: MakeQVar[I, F]) extends AnyVal {
    def empty[A]: I[QVar[F, A]]    = makeMVar.qvarEmpty[A]
    def of[A](a: A): I[QVar[F, A]] = makeMVar.qvarOf(a)
  }

  implicit def concurrentMakeMVar[I[_]: Sync, F[_]: Concurrent]: MakeQVar[I, F] = new MakeQVar[I, F] {
    def qvarOf[A](a: A): I[QVar[F, A]] = MVar.in[I, F, A](a).map(QVarByMVar(_))
    def qvarEmpty[A]: I[QVar[F, A]]    = MVar.emptyIn[I, F, A].map(QVarByMVar(_))
  }
} 
Example 9
Source File: traverse.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu
package concurrent
package syntax
import cats.effect.Concurrent
import cats.effect.concurrent.Semaphore
import cats.syntax.parallel._
import cats.{Parallel, Traverse}
import tofu.syntax.monadic._

object traverse {
  implicit final class TraverseOps[T[_], A](val ta: T[A]) extends AnyVal {

    @deprecated("Duplicates cats.effect.syntax.ParallelNSyntax of cats-effect 2.0.0", "0.6.3")
    def limitedTraverse[F[_], B](
        batchSize: Int
    )(f: A => F[B])(implicit T: Traverse[T], F: Concurrent[F], P: Parallel[F]): F[T[B]] =
      for {
        semaphore <- Semaphore[F](batchSize.toLong)
        result    <- ta.parTraverse(value => semaphore.withPermit(f(value)))
      } yield result
  }
} 
Example 10
Source File: package.scala    From fs2-aws   with MIT License 5 votes vote down vote up
package fs2.aws

import cats.effect.Concurrent
import cats.effect.concurrent.Ref
import cats.implicits._
import fs2.concurrent.Queue
import fs2.{ Pipe, Stream }

package object core {

  
  def groupBy[F[_], A, K](
    selector: A => F[K]
  )(implicit F: Concurrent[F]): Pipe[F, A, (K, Stream[F, A])] = { in =>
    Stream.eval(Ref.of[F, Map[K, Queue[F, Option[A]]]](Map.empty)).flatMap { queueMap =>
      val cleanup = {
        queueMap.get.flatMap(_.values.toList.traverse_(_.enqueue1(None)))
      }

      (in ++ Stream.eval_(cleanup))
        .evalMap { elem =>
          (selector(elem), queueMap.get).mapN { (key, queues) =>
            queues
              .get(key)
              .fold {
                for {
                  newQ <- Queue.unbounded[F, Option[A]] // Create a new queue
                  _    <- queueMap.modify(queues => (queues + (key -> newQ), queues))
                  _ <- newQ.enqueue1(
                        elem.some
                      ) // Enqueue the element lifted into an Option to the new queue
                } yield (key -> newQ.dequeue.unNoneTerminate).some
              }(_.enqueue1(elem.some) as None)
          }.flatten
        }
        .unNone
        .onFinalize(cleanup)
    }
  }
} 
Example 11
Source File: Channel.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.kafkadistributedprocessing.internal

import aecor.kafkadistributedprocessing.internal
import aecor.kafkadistributedprocessing.internal.Channel.CompletionCallback
import cats.effect.Concurrent
import cats.effect.concurrent.Deferred
import cats.effect.implicits._
import cats.implicits._

private[kafkadistributedprocessing] final case class Channel[F[_]](watch: F[CompletionCallback[F]],
                                                                   close: F[Unit],
                                                                   call: F[Unit])

private[kafkadistributedprocessing] object Channel {
  type CompletionCallback[F[_]] = F[Unit]
  def create[F[_]: Concurrent]: F[Channel[F]] =
    for {
      deferredCallback <- Deferred[F, CompletionCallback[F]]
      closed <- Deferred[F, Unit]
      close = closed.complete(())
      watch = deferredCallback.get
      call = Deferred[F, Unit]
        .flatMap { deferredCompletion =>
          deferredCallback
            .complete(deferredCompletion.complete(()).attempt.void) >> deferredCompletion.get
        }
        .race(closed.get)
        .void

    } yield internal.Channel(watch, close, call)
} 
Example 12
Source File: FS2QueueProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.process

import aecor.distributedprocessing.DistributedProcessing.{ Process, RunningProcess }
import cats.effect.Concurrent
import cats.effect.concurrent.Deferred
import cats.implicits._
import fs2._
import fs2.concurrent.Queue
import cats.effect.implicits._

object FS2QueueProcess {
  def create[F[_]: Concurrent, A](
    sources: List[Stream[F, A]]
  ): F[(Stream[F, Stream[F, A]], List[Process[F]])] =
    for {
      queue <- Queue.bounded[F, Stream[F, A]](sources.length)
      processes = sources.map { s =>
        Process {
          Deferred[F, Either[Throwable, Unit]].flatMap { stopped =>
            queue
              .enqueue1(s.interruptWhen(stopped))
              .flatTap(_ => stopped.get)
              .start
              .map { fiber =>
                RunningProcess(fiber.join, stopped.complete(Right(())))
              }
          }
        }
      }
    } yield (queue.dequeue, processes)
} 
Example 13
Source File: DefaultTransactionService.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.transaction
import aecor.example.account.AccountId
import aecor.example.common.Amount
import aecor.example.transaction.TransactionRoute.ApiResult
import aecor.example.transaction.transaction.Transactions
import cats.effect.{ Concurrent, Timer }
import cats.implicits._

import scala.concurrent.duration._

final class DefaultTransactionService[F[_]](transactions: Transactions[F])(
  implicit F: Concurrent[F],
  timer: Timer[F]
) extends TransactionService[F] {

  def authorizePayment(transactionId: TransactionId,
                       from: From[AccountId],
                       to: To[AccountId],
                       amount: Amount): F[TransactionRoute.ApiResult] =
    transactions(transactionId)
      .create(from, to, amount)
      .flatMap { _ =>
        val getTransaction = transactions(transactionId).getInfo
          .flatMap {
            case Right(t) => t.pure[F]
            case _ =>
              F.raiseError[Algebra.TransactionInfo](new IllegalStateException("Something went bad"))
          }
        def loop: F[Boolean] = getTransaction.flatMap {
          case Algebra.TransactionInfo(_, _, _, Some(value)) => value.pure[F]
          case _                                             => timer.sleep(10.millis) >> loop
        }
        Concurrent.timeout(loop, 10.seconds)
      }
      .map { succeeded =>
        if (succeeded) {
          ApiResult.Authorized
        } else {
          ApiResult.Declined("You suck")
        }
      }
}

object DefaultTransactionService {
  def apply[F[_]](transactions: Transactions[F])(implicit F: Concurrent[F],
                                                 timer: Timer[F]): TransactionService[F] =
    new DefaultTransactionService[F](transactions)
} 
Example 14
Source File: IOSuite.scala    From skafka   with MIT License 5 votes vote down vote up
package com.evolutiongaming.skafka

import cats.Parallel
import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer}
import cats.implicits._
import com.evolutiongaming.catshelper.FromFuture
import com.evolutiongaming.smetrics.MeasureDuration
import org.scalatest.Succeeded

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

object IOSuite {
  val Timeout: FiniteDuration = 10.seconds

  implicit val executor: ExecutionContextExecutor = ExecutionContext.global

  implicit val contextShiftIO: ContextShift[IO]     = IO.contextShift(executor)
  implicit val concurrentIO: Concurrent[IO]         = IO.ioConcurrentEffect
  implicit val timerIO: Timer[IO]                   = IO.timer(executor)
  implicit val parallelIO: Parallel[IO]             = IO.ioParallel
  implicit val fromFutureIO: FromFuture[IO]         = FromFuture.lift[IO]
  implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock[IO](Clock[IO])

  def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = {
    io.timeout(timeout).as(Succeeded).unsafeToFuture
  }

  implicit class IOOps[A](val self: IO[A]) extends AnyVal {
    def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout)
  }
} 
Example 15
Source File: ConsumerOf.scala    From skafka   with MIT License 5 votes vote down vote up
package com.evolutiongaming.skafka.consumer

import cats.effect.{Bracket, Concurrent, ContextShift, Resource}
import cats.{Applicative, Defer, ~>}
import com.evolutiongaming.catshelper.{ToFuture, ToTry}
import com.evolutiongaming.skafka.FromBytes
import com.evolutiongaming.smetrics.MeasureDuration

import scala.concurrent.ExecutionContext

trait ConsumerOf[F[_]] {

  def apply[K, V](
    config: ConsumerConfig)(implicit
    fromBytesK: FromBytes[F, K],
    fromBytesV: FromBytes[F, V]
  ): Resource[F, Consumer[F, K, V]]
}

object ConsumerOf {

  def apply[F[_] : Concurrent : ContextShift : ToTry : ToFuture : MeasureDuration](
    executorBlocking: ExecutionContext,
    metrics: Option[ConsumerMetrics[F]] = None
  ): ConsumerOf[F] = new ConsumerOf[F] {

    def apply[K, V](
      config: ConsumerConfig)(implicit
      fromBytesK: FromBytes[F, K],
      fromBytesV: FromBytes[F, V]
    ) = {
      for {
        consumer <- Consumer.of[F, K, V](config, executorBlocking)
      } yield {
        metrics.fold(consumer)(consumer.withMetrics[Throwable])
      }
    }
  }


  implicit class ConsumerOfOps[F[_]](val self: ConsumerOf[F]) extends AnyVal {

    def mapK[G[_] : Applicative : Defer](
      fg: F ~> G,
      gf: G ~> F)(implicit
      B: Bracket[F, Throwable]
    ): ConsumerOf[G] = new ConsumerOf[G] {

      def apply[K, V](
        config: ConsumerConfig)(implicit
        fromBytesK: FromBytes[G, K],
        fromBytesV: FromBytes[G, V]
      ) = {
        for {
          a <- self[K, V](config)(fromBytesK.mapK(gf), fromBytesV.mapK(gf)).mapK(fg)
        } yield {
          a.mapK(fg, gf)
        }
      }
    }
  }
} 
Example 16
Source File: ConditionalLogger.scala    From odin   with Apache License 2.0 5 votes vote down vote up
package io.odin.extras.loggers

import cats.MonadError
import cats.effect.{Concurrent, ContextShift, ExitCase, Resource, Timer}
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
import cats.syntax.functor._
import cats.syntax.order._
import io.odin.loggers.DefaultLogger
import io.odin.{Level, Logger, LoggerMessage}
import monix.catnap.ConcurrentQueue
import monix.execution.{BufferCapacity, ChannelType}

final case class ConditionalLogger[F[_]: Timer] private (
    queue: ConcurrentQueue[F, LoggerMessage],
    inner: Logger[F],
    override val minLevel: Level
)(implicit F: MonadError[F, Throwable])
    extends DefaultLogger[F](minLevel) {

  def log(msg: LoggerMessage): F[Unit] =
    queue.tryOffer(msg).void

  private def drain(exitCase: ExitCase[Throwable]): F[Unit] = {
    val level = exitCase match {
      case ExitCase.Completed => inner.minLevel
      case _                  => minLevel
    }

    queue
      .drain(0, Int.MaxValue)
      .flatMap(msgs => inner.log(msgs.filter(_.level >= level).toList))
      .attempt
      .void
  }

}

object ConditionalLogger {

  
  def create[F[_]: Timer: Concurrent: ContextShift](
      inner: Logger[F],
      minLevelOnError: Level,
      maxBufferSize: Option[Int]
  ): Resource[F, Logger[F]] = {

    val queueCapacity = maxBufferSize match {
      case Some(value) => BufferCapacity.Bounded(value)
      case None        => BufferCapacity.Unbounded()
    }

    def acquire: F[ConditionalLogger[F]] =
      for {
        queue <- ConcurrentQueue.withConfig[F, LoggerMessage](queueCapacity, ChannelType.MPSC)
      } yield ConditionalLogger(queue, inner, minLevelOnError)

    def release(logger: ConditionalLogger[F], exitCase: ExitCase[Throwable]): F[Unit] =
      logger.drain(exitCase)

    Resource.makeCase(acquire)(release).widen
  }

} 
Example 17
Source File: implicits.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.network.tcp

import cats.effect.{Concurrent, ContextShift, IO, Sync}
import cats.implicits._
import fs2.Chunk
import fs2.io.tcp.Socket
import javax.net.ssl.SSLContext
import jbok.common.thread.ThreadUtil
import jbok.crypto.ssl.SSLContextHelper
import jbok.network.Message
import spinoco.fs2.crypto.io.tcp.TLSSocket

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object implicits {
  val maxBytes: Int           = 4 * 1024 * 1024
  val timeout                 = Some(10.seconds)
  val sslEC: ExecutionContext = ThreadUtil.blockingThreadPool[IO]("jbok-tls").allocated.unsafeRunSync()._1

  implicit class TcpSocketOps[F[_]](val socket: Socket[F]) extends AnyVal {
    def readMessage(implicit F: Sync[F]): F[Message[F]] =
      socket.read(maxBytes, timeout).flatMap {
        case Some(chunk) => Message.decodeChunk(chunk)
        case None        => F.raiseError(new Exception(s"socket already closed"))
      }

    def writeMessage(message: Message[F]): F[Unit] =
      socket.write(Chunk.array(Message.encodeBytes(message).byteArray), timeout)

    def toTLSSocket(sslOpt: Option[SSLContext], client: Boolean)(implicit F: Concurrent[F], cs: ContextShift[F]): F[Socket[F]] =
      sslOpt match {
        case Some(ssl) =>
          if (client) TLSSocket.instance(socket, SSLContextHelper.clientEngine(ssl).engine, sslEC).widen[Socket[F]]
          else TLSSocket.instance(socket, SSLContextHelper.serverEngine(ssl).engine, sslEC).widen[Socket[F]]
        case None => F.pure(socket)
      }
  }
} 
Example 18
Source File: TimedOut.scala    From ticket-booking-aecor   with Apache License 2.0 5 votes vote down vote up
package ru.pavkin.booking.common.effect
import cats.effect.{Concurrent, Timer}
import cats.~>

import scala.concurrent.TimeoutException
import scala.concurrent.duration.FiniteDuration

object TimedOut {
  def apply[F[_]](timeout: FiniteDuration)(implicit timer: Timer[F], F: Concurrent[F]): F ~> F =
    new (F ~> F) {
      def apply[A](fa: F[A]): F[A] =
        Concurrent.timeoutTo(
          fa,
          timeout,
          F.raiseError(new TimeoutException(s"Call timed out after $timeout"))
        )
    }
} 
Example 19
Source File: AbstractMessageSocket.scala    From skunk   with MIT License 5 votes vote down vote up
// Copyright (c) 2018-2020 by Rob Norris
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT

package skunk.net

import skunk.net.message.BackendMessage
import skunk.util.Origin
import cats.effect.Concurrent
import cats.implicits._
import skunk.exception.ProtocolError

abstract class AbstractMessageSocket[F[_]: Concurrent]
  extends MessageSocket[F] {

    override def expect[B](f: PartialFunction[BackendMessage, B])(implicit or: Origin): F[B] =
      receive.flatMap { m =>
        if (f.isDefinedAt(m)) f(m).pure[F]
        else Concurrent[F].raiseError(new ProtocolError(m, or))
      }

  override def flatExpect[B](f: PartialFunction[BackendMessage, F[B]])(implicit or: Origin): F[B] =
      expect(f).flatten

  } 
Example 20
Source File: implicits.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.impl.cats

import cats.effect.Concurrent
import cats.~>
import sttp.client.monad.{MonadAsyncError, MonadError}
import sttp.client.ws.WebSocketResponse
import sttp.client.{Request, Response, SttpBackend}

import scala.language.higherKinds

object implicits extends CatsImplicits

trait CatsImplicits extends LowLevelCatsImplicits {
  implicit final def sttpBackendToCatsMappableSttpBackend[R[_], S, WS_HANDLER[_]](
      sttpBackend: SttpBackend[R, S, WS_HANDLER]
  ): MappableSttpBackend[R, S, WS_HANDLER] = new MappableSttpBackend(sttpBackend)

  implicit final def asyncMonadError[F[_]: Concurrent]: MonadAsyncError[F] = new CatsMonadAsyncError[F]
}

trait LowLevelCatsImplicits {
  implicit final def catsMonadError[F[_]](implicit E: cats.MonadError[F, Throwable]): MonadError[F] =
    new CatsMonadError[F]
}

final class MappableSttpBackend[F[_], S, WS_HANDLER[_]] private[cats] (
    private val sttpBackend: SttpBackend[F, S, WS_HANDLER]
) extends AnyVal {
  def mapK[G[_]: MonadError](f: F ~> G): SttpBackend[G, S, WS_HANDLER] =
    new MappedKSttpBackend(sttpBackend, f, implicitly)
}

private[cats] final class MappedKSttpBackend[F[_], -S, WS_HANDLER[_], G[_]](
    wrapped: SttpBackend[F, S, WS_HANDLER],
    mapping: F ~> G,
    val responseMonad: MonadError[G]
) extends SttpBackend[G, S, WS_HANDLER] {
  def send[T](request: Request[T, S]): G[Response[T]] = mapping(wrapped.send(request))

  override def openWebsocket[T, WS_RESULT](
      request: Request[T, S],
      handler: WS_HANDLER[WS_RESULT]
  ): G[WebSocketResponse[WS_RESULT]] = mapping(wrapped.openWebsocket(request, handler))

  def close(): G[Unit] = mapping(wrapped.close())
} 
Example 21
Source File: package.scala    From fs2-cron   with Apache License 2.0 5 votes vote down vote up
package eu.timepit

import java.time.LocalDateTime
import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeUnit

import cats.ApplicativeError
import cats.effect.{Concurrent, Sync, Timer}
import cron4s.expr.CronExpr
import cron4s.lib.javatime._
import fs2.Stream

import scala.concurrent.duration.FiniteDuration

package object fs2cron {

  
  def sleepCron[F[_]: Sync](cronExpr: CronExpr)(implicit timer: Timer[F]): Stream[F, Unit] =
    durationFromNow(cronExpr).flatMap(Stream.sleep[F])

  def schedule[F[_]: Concurrent, A](tasks: List[(CronExpr, Stream[F, A])])(implicit
      timer: Timer[F]
  ): Stream[F, A] = {
    val scheduled = tasks.map { case (cronExpr, task) => awakeEveryCron[F](cronExpr) >> task }
    Stream.emits(scheduled).covary[F].parJoinUnbounded
  }
} 
Example 22
Source File: Publish.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.util

import cats.{FlatMap, Monad}
import cats.effect.Concurrent
import cats.syntax.flatMap._
import fs2.Pipe
import fs2.concurrent.{Enqueue, Topic}
import zio.{IO, Task, ZIO, ZQueue}
import zio.stream.Take


trait Publish[F[+_], -T] {

  def publish1(t: T): F[Unit]

  def publish: Pipe[F, T, Unit] = _.evalMap(publish1)

  def contramap[U](fn: U => T): Publish[F, U] = new Publish[F, U] {
    override def publish1(t: U): F[Unit] = Publish.this.publish1(fn(t))
    override def publish: Pipe[F, U, Unit] = {
      stream => Publish.this.publish(stream.map(fn))
    }
  }

  def contraFlatMap[U](fn: U => F[T])(implicit F: Monad[F]): Publish[F, U] = new Publish[F, U] {
    override def publish1(t: U): F[Unit] = fn(t).flatMap(u => Publish.this.publish1(u))
    override def publish: Pipe[F, U, Unit] = stream => stream.evalMap(publish1)
  }

  def some[U](implicit ev: Option[U] <:< T): Publish[F, U] = contramap[U](u => ev(Option(u)))

  def tap[T1 <: T](into: T1 => F[Unit])(implicit F: FlatMap[F]): Publish[F, T1] = new Publish[F, T1] {
    def publish1(t: T1): F[Unit] = Publish.this.publish1(t).flatMap(_ => into(t))
    override def publish: Pipe[F, T1, Unit] = stream => stream.evalTap(into).through(Publish.this.publish)
  }

  def tap[T1 <: T](into: Publish[F, T1])(implicit F: Concurrent[F]): Publish[F, T1] = new Publish[F, T1] {
    def publish1(t: T1): F[Unit] = Publish.this.publish1(t).flatMap(_ => into.publish1(t))
    override def publish: Pipe[F, T1, Unit] = stream => stream.broadcastThrough(Publish.this.publish, into.publish)
  }

}

object Publish {

  // Allow a Topic to be treated as a Publish
  final case class PublishTopic[F[+_], -T, T1 >: T](topic: Topic[F, T1]) extends Publish[F, T] {
    override def publish1(t: T): F[Unit] = topic.publish1(t)
    override def publish: Pipe[F, T, Unit] = topic.publish
  }

  implicit def topicToPublish[F[+_], T](topic: Topic[F, T]): Publish[F, T] = PublishTopic(topic)

  def apply[F[+_], T](topic: Topic[F, T]): Publish[F, T] = topic

  final case class PublishEnqueue[F[+_], -T, T1 >: T](queue: Enqueue[F, T1]) extends Publish[F, T] {
    override def publish1(t: T): F[Unit] = queue.enqueue1(t)
    override def publish: Pipe[F, T, Unit] = queue.enqueue
  }

  implicit def enqueueToPublish[F[+_], T, T1 <: T](enqueue: Enqueue[F, T]): Publish[F, T1] = PublishEnqueue(enqueue)

  def apply[F[+_], T](enqueue: Enqueue[F, T]): Publish[F, T] = enqueue

  def fn[F[+_], T](fn: T => F[Unit]): Publish[F, T] = new Publish[F, T] {
    override def publish1(t: T): F[Unit] = fn(t)
  }

  final case class PublishZQueueTake[RA, EA, RB, EB, ET <: EA, A, B](queue: ZQueue[RA, EA, RB, EB, Take[ET, A], B]) extends Publish[ZIO[RA, EA, +?], A] {
    override def publish1(t: A): ZIO[RA, EA, Unit] = queue.offer(Take.Value(t)).doUntil(identity).unit
  }

  implicit def zqueueTakeToPublish[RA, EA, RB, EB, ET <: EA, A, B](queue: ZQueue[RA, EA, RB, EB, Take[ET, A], B]): Publish[ZIO[RA, EA, +?], A] = PublishZQueueTake(queue)

  def apply[E, E1 <: E, A](queue: zio.Queue[Take[E1, A]])(implicit dummyImplicit: DummyImplicit): Publish[IO[E, +?], A] = PublishZQueueTake(queue)
} 
Example 23
Source File: ParallelNSyntax.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.syntax

import cats.effect.Concurrent
import cats.effect.implicits._
import cats.{Monad, Parallel, Traverse}

trait ParallelNSyntax {
  implicit final def catsSyntaxParallelTraverseNConcurrent[T[_]: Traverse, A](
    ta: T[A]
  ): ParallelTraversableNConcurrentOps[T, A] =
    new ParallelTraversableNConcurrentOps[T, A](ta)

  implicit final def catsSyntaxParallelSequenceNConcurrent[T[_]: Traverse, M[_]: Monad, A](
    tma: T[M[A]]
  ): ParallelSequenceNConcurrentOps[T, M, A] = new ParallelSequenceNConcurrentOps[T, M, A](tma)
}

final class ParallelSequenceNConcurrentOps[T[_], M[_], A](private val tma: T[M[A]]) extends AnyVal {
  def parSequenceN(n: Long)(implicit M: Concurrent[M], T: Traverse[T], P: Parallel[M]): M[T[A]] =
    M.parSequenceN(n)(tma)
}

final class ParallelTraversableNConcurrentOps[T[_], A](private val ta: T[A]) extends AnyVal {
  def parTraverseN[M[_], B](n: Long)(f: A => M[B])(implicit M: Concurrent[M], T: Traverse[T], P: Parallel[M]): M[T[B]] =
    M.parTraverseN(n)(ta)(f)
} 
Example 24
Source File: Repeated.scala    From datadog4s   with MIT License 5 votes vote down vote up
package com.avast.cloud.datadog4s.helpers

import java.time.Duration

import cats.effect.{ Concurrent, Resource, Timer }
import cats.syntax.applicativeError._
import cats.syntax.flatMap._
import cats.syntax.apply._
import cats.syntax.applicative._

import scala.concurrent.duration._

object Repeated {

  
  def run[F[_]: Concurrent: Timer](
    delay: Duration,
    iterationTimeout: Duration,
    errorHandler: Throwable => F[Unit]
  )(task: F[Unit]): Resource[F, F[Unit]] = {
    val safeTask = Concurrent.timeout(task, toScala(iterationTimeout)).attempt.flatMap {
      case Right(a) => a.pure[F]
      case Left(e)  => errorHandler(e)
    }

    val snooze  = Timer[F].sleep(toScala(delay))
    val process = (safeTask *> snooze).foreverM[Unit]

    Concurrent[F].background(process)
  }

  private def toScala(duration: Duration): FiniteDuration =
    duration.toMillis.millis
} 
Example 25
Source File: RedisChannel.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.net.InetSocketAddress

import _root_.fs2._
import _root_.fs2.io.tcp.{Socket, SocketGroup}
import cats.MonadError
import cats.effect.{Blocker, Concurrent, ContextShift, Resource}
import cats.syntax.flatMap._
import laserdisc.protocol._
import log.effect.LogWriter
import scodec.Codec
import scodec.stream.{StreamDecoder, StreamEncoder}

import scala.concurrent.duration.FiniteDuration

object RedisChannel {
  private[this] final val streamDecoder = StreamDecoder.many(Codec[RESP])
  private[this] final val streamEncoder = StreamEncoder.many(Codec[RESP])

  private[fs2] final def apply[F[_]: ContextShift: LogWriter: Concurrent](
      address: InetSocketAddress,
      writeTimeout: Option[FiniteDuration],
      readMaxBytes: Int
  )(blocker: Blocker): Pipe[F, RESP, RESP] = {
    def connectedSocket: Resource[F, Socket[F]] =
      SocketGroup(blocker, nonBlockingThreadCount = 4) >>= (_.client(address, noDelay = true))

    stream =>
      Stream.resource(connectedSocket) >>= { socket =>
        val send    = stream.through(impl.send(socket.writes(writeTimeout)))
        val receive = socket.reads(readMaxBytes).through(impl.receiveResp)

        send.drain
          .covaryOutput[RESP]
          .mergeHaltBoth(receive)
          .onFinalizeWeak(socket.endOfOutput)
      }
  }

  private[this] final object impl {
    def send[F[_]: MonadError[*[_], Throwable]](socketChannel: Pipe[F, Byte, Unit])(
        implicit log: LogWriter[F]
    ): Pipe[F, RESP, Unit] =
      _.evalTap(resp => log.trace(s"sending $resp"))
        .through(streamEncoder.encode[F])
        .flatMap(bits => Stream.chunk(Chunk.bytes(bits.toByteArray)))
        .through(socketChannel)

    def receiveResp[F[_]: MonadError[*[_], Throwable]](implicit log: LogWriter[F]): Pipe[F, Byte, RESP] = {
      def framing: Pipe[F, Byte, CompleteFrame] = {
        def loopScan(bytesIn: Stream[F, Byte], previous: RESPFrame): Pull[F, CompleteFrame, Unit] =
          bytesIn.pull.uncons.flatMap {
            case Some((chunk, rest)) =>
              previous.append(chunk.toByteBuffer) match {
                case Left(ex)                    => Pull.raiseError(ex)
                case Right(frame: CompleteFrame) => Pull.output1(frame) >> loopScan(rest, EmptyFrame)
                case Right(frame: MoreThanOneFrame) =>
                  Pull.output(Chunk.vector(frame.complete)) >> {
                    if (frame.remainder.isEmpty) loopScan(rest, EmptyFrame)
                    else loopScan(rest, IncompleteFrame(frame.remainder, 0L))
                  }
                case Right(frame: IncompleteFrame) => loopScan(rest, frame)
              }

            case _ => Pull.done
          }

        bytesIn => loopScan(bytesIn, EmptyFrame).stream
      }

      pipeIn =>
        streamDecoder
          .decode(pipeIn.through(framing) map (_.bits))
          .evalTap(resp => log.trace(s"receiving $resp"))
    }
  }
} 
Example 26
Source File: PromiseMapper.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import cats.effect.concurrent.Deferred
import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, Timer}
import cats.syntax.flatMap._
import cats.syntax.monadError._
import shapeless.Poly1

import scala.concurrent.TimeoutException

object PromiseMapper extends Poly1 {
  private[this] final def mapper[F[_]: Concurrent: Timer, A](protocol: Protocol.Aux[A]): Env[F] => F[Maybe[A]] = {
    case (queue, duration) =>
      Deferred[F, Maybe[A]] >>= { promise =>
        queue.enqueue1(Request(protocol, promise.complete)) >> {
          promise.get
            .timeout(duration)
            .adaptError {
              case _: TimeoutException => RequestTimedOut(protocol)
            }
        }
      }
  }

  implicit def mkOne[F[_]: Timer: Concurrent, A]: Case.Aux[Protocol.Aux[A], Env[F] => F[Maybe[A]]] = at[Protocol.Aux[A]](mapper(_))
} 
Example 27
Source File: ProductIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{ProductErr, ProductPriceErr}
import external._
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

sealed trait ProductIntegration[F[_]] {
  def product: ProductId => F[Option[Product]]
  def productPrice: Product => UserPreferences => F[Price]
}

object ProductIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]](
    productDep: TeamTwoHttpApi,
    pricesDep: TeamOneHttpApi,
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): ProductIntegration[F] =
    new ProductIntegration[F] {
      def product: ProductId => F[Option[Product]] = { ps =>
        CS.shift >> productDep.product(ps).adaptedTo[F].timeout(t).narrowFailureTo[ProductErr]
      }

      def productPrice: Product => UserPreferences => F[Price] = { p => pref =>
        CS.shift >> pricesDep.productPrice(p)(pref).adaptedTo[F].timeout(t).narrowFailureTo[ProductPriceErr]
      }
    }
} 
Example 28
Source File: CacheIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{CacheLookupError, CacheStoreError}
import external.TeamThreeCacheApi
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.duration.FiniteDuration

sealed trait CacheIntegration[F[_]] {
  def cachedProduct: ProductId => F[Option[Product]]
  def storeProductToCache: ProductId => Product => F[Unit]
}

object CacheIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]](
    cache: TeamThreeCacheApi[ProductId, Product],
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): CacheIntegration[F] =
    new CacheIntegration[F] {
      def cachedProduct: ProductId => F[Option[Product]] =
        pId => CS.shift >> cache.get(pId).adaptedTo[F].timeout(t).narrowFailureTo[CacheLookupError]

      def storeProductToCache: ProductId => Product => F[Unit] =
        pId => p => CS.shift >> cache.put(pId)(p).adaptedTo[F].timeout(t).narrowFailureTo[CacheStoreError]
    }
} 
Example 29
Source File: UserIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{PreferenceErr, UserErr}
import external._
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

sealed trait UserIntegration[F[_]] {
  def user: UserId => F[User]
  def usersPreferences: UserId => F[UserPreferences]
}

object UserIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]](
    userDep: TeamTwoHttpApi,
    preferencesDep: TeamOneHttpApi,
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): UserIntegration[F] =
    new UserIntegration[F] {
      def user: UserId => F[User] = { id =>
        CS.shift >> userDep.user(id).adaptedTo[F].timeout(t).narrowFailureTo[UserErr]
      }

      def usersPreferences: UserId => F[UserPreferences] = { id =>
        CS.shift >> preferencesDep.usersPreferences(id).adaptedTo[F].timeout(t).narrowFailureTo[PreferenceErr]
      }
    }
} 
Example 30
Source File: PriceService.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package service

import cats.Parallel
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.apply._
import cats.syntax.flatMap._
import cats.syntax.parallel._
import external.library.IoAdapt.-->
import external.{TeamOneHttpApi, TeamThreeCacheApi, TeamTwoHttpApi}
import integration.{CacheIntegration, ProductIntegration, UserIntegration}
import log.effect.LogWriter
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration._

final case class PriceService[F[_]: Concurrent: Timer: ContextShift: Parallel[*[_]]](
  cacheDep: TeamThreeCacheApi[ProductId, Product],
  teamOneStupidName: TeamOneHttpApi,
  teamTwoStupidName: TeamTwoHttpApi,
  logger: LogWriter[F]
)(
  implicit ev1: IO --> F,
  ev2: Future --> F
) {
  private[this] val cache      = CacheIntegration[F](cacheDep, 10.seconds)
  private[this] val userInt    = UserIntegration[F](teamTwoStupidName, teamOneStupidName, 10.seconds)
  private[this] val productInt = ProductIntegration[F](teamTwoStupidName, teamOneStupidName, 10.seconds)

  private[this] lazy val productRepo: ProductRepo[F]             = ProductRepo(cache, productInt, logger)
  private[this] lazy val priceCalculator: PriceCalculator[F]     = PriceCalculator(productInt, logger)
  private[this] lazy val preferenceFetcher: PreferenceFetcher[F] = PreferenceFetcher(userInt, logger)

  
  def prices(userId: UserId, productIds: Seq[ProductId]): F[List[Price]] =
    (userFor(userId), productsFor(productIds), preferencesFor(userId))
      .parMapN(priceCalculator.finalPrices)
      .flatten

  private[this] def userFor(userId: UserId): F[User] =
    logger.debug(s"Collecting user details for id $userId") >>
      userInt.user(userId) <*
      logger.debug(s"User details collected for id $userId")

  private[this] def preferencesFor(userId: UserId): F[UserPreferences] =
    logger.debug(s"Looking up user preferences for user $userId") >>
      preferenceFetcher.userPreferences(userId) <*
      logger.debug(s"User preferences look up for $userId completed")

  private[this] def productsFor(productIds: Seq[ProductId]): F[List[Product]] =
    logger.debug(s"Collecting product details for products $productIds") >>
      productRepo.storedProducts(productIds) <*
      logger.debug(s"Product details collection for $productIds completed")
} 
Example 31
Source File: IoAdapt.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package external
package library

import cats.arrow.FunctionK
import cats.effect.{Concurrent, ContextShift, IO}
import external.library.IoAdapt.-->
import zio.{Task, ZIO}

import scala.concurrent.Future


  def apply[A]: (=>F[A]) => G[A]

  def functionK: FunctionK[F, G] =
    λ[FunctionK[F, G]](apply(_))
}

private[library] sealed trait IoAdaptInstances {
  implicit def catsIoToZioTask(implicit cc: Concurrent[Task]): IO --> Task =
    new IoAdapt[IO, Task] {
      def apply[A]: (=>IO[A]) => Task[A] =
        io => cc.liftIO(io)
    }

  implicit val futureToZioTask: Future --> Task =
    new IoAdapt[Future, Task] {
      def apply[A]: (=>Future[A]) => Task[A] =
        ft => ZIO.fromFuture(ec => ft.map(identity)(ec))
    }

  implicit def futureToIo(implicit cs: ContextShift[IO]): Future --> IO =
    new IoAdapt[Future, IO] {
      def apply[A]: (=>Future[A]) => IO[A] =
        IO.fromFuture[A] _ compose IO.delay
    }
}

object IoAdapt extends IoAdaptInstances {
  type -->[F[_], G[_]] = IoAdapt[F, G]
} 
Example 32
Source File: CatsInteropSpec.scala    From interop-cats   with Apache License 2.0 5 votes vote down vote up
package zio.interop

import cats.effect.{ Concurrent, Resource }
import org.specs2.Specification
import org.specs2.specification.AroundTimeout
import zio.{ Promise, Runtime, Task }
import zio.interop.catz._

class CatsInteropSpec extends Specification with AroundTimeout {

  def is = s2"""
      Resource
        cats fiber wrapped in Resource can be canceled $catsResourceInterruptible
      """

  def catsResourceInterruptible = {

    val io = for {
      p        <- Promise.make[Nothing, Int]
      resource = Resource.make(Concurrent[Task].start(p.succeed(1) *> Task.never))(_.cancel)
      _        <- resource.use(_ => p.await)
    } yield 0

    Runtime.default.unsafeRun(io) must be_===(0)
  }

} 
Example 33
Source File: InvoicesApi.scala    From event-sourcing-kafka-streams   with MIT License 5 votes vote down vote up
package org.amitayh.invoices.web

import java.util.UUID

import cats.effect.{Concurrent, Timer}
import cats.implicits._
import fs2.Stream
import fs2.concurrent.Topic
import io.circe._
import io.circe.generic.auto._
import io.circe.syntax._
import org.amitayh.invoices.common.domain.CommandResult.{Failure, Success}
import org.amitayh.invoices.common.domain.{Command, CommandResult}
import org.amitayh.invoices.dao.InvoiceList
import org.amitayh.invoices.web.CommandDto._
import org.amitayh.invoices.web.PushEvents.CommandResultRecord
import org.http4s.circe._
import org.http4s.dsl.Http4sDsl
import org.http4s.{EntityDecoder, HttpRoutes, Response}

import scala.concurrent.duration._

class InvoicesApi[F[_]: Concurrent: Timer] extends Http4sDsl[F] {

  private val maxQueued = 16

  implicit val commandEntityDecoder: EntityDecoder[F, Command] = jsonOf[F, Command]

  def service(invoiceList: InvoiceList[F],
              producer: Kafka.Producer[F, UUID, Command],
              commandResultsTopic: Topic[F, CommandResultRecord]): HttpRoutes[F] = HttpRoutes.of[F] {
    case GET -> Root / "invoices" =>
      invoiceList.get.flatMap(invoices => Ok(invoices.asJson))

    case request @ POST -> Root / "execute" / "async" / UuidVar(invoiceId) =>
      request
        .as[Command]
        .flatMap(producer.send(invoiceId, _))
        .flatMap(metaData => Accepted(Json.fromLong(metaData.timestamp)))

    case request @ POST -> Root / "execute" / UuidVar(invoiceId) =>
      request.as[Command].flatMap { command =>
        val response = resultStream(commandResultsTopic, command.commandId) merge timeoutStream
        producer.send(invoiceId, command) *> response.head.compile.toList.map(_.head)
      }
  }

  private def resultStream(commandResultsTopic: Topic[F, CommandResultRecord],
                           commandId: UUID): Stream[F, Response[F]] =
    commandResultsTopic.subscribe(maxQueued).collectFirst {
      case Some((_, CommandResult(_, `commandId`, outcome))) => outcome
    }.flatMap {
      case Success(_, _, snapshot) => Stream.eval(Ok(snapshot.asJson))
      case Failure(cause) => Stream.eval(UnprocessableEntity(cause.message))
    }

  private def timeoutStream: Stream[F, Response[F]] =
    Stream.eval(Timer[F].sleep(5.seconds) *> RequestTimeout("timeout"))

}

object InvoicesApi {
  def apply[F[_]: Concurrent: Timer]: InvoicesApi[F] = new InvoicesApi[F]
} 
Example 34
Source File: KafkaAdminAlgebra.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import cats.effect.concurrent.Ref
import cats.effect.{Async, Concurrent, ContextShift, Resource, Sync}
import cats.implicits._
import fs2.kafka._
import hydra.core.protocol._
import hydra.kafka.util.KafkaUtils.TopicDetails
import org.apache.kafka.clients.admin.NewTopic
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException

import scala.util.control.NoStackTrace


  def deleteTopic(name: String): F[Unit]
}

object KafkaAdminAlgebra {

  type TopicName = String
  final case class Topic(name: TopicName, numberPartitions: Int)

  def live[F[_]: Sync: Concurrent: ContextShift](
      bootstrapServers: String,
  ): F[KafkaAdminAlgebra[F]] = Sync[F].delay {
    new KafkaAdminAlgebra[F] {

      override def describeTopic(name: TopicName): F[Option[Topic]] = {
        getAdminClientResource
          .use(_.describeTopics(name :: Nil))
          .map(_.headOption.map(_._2).map { td =>
            Topic(td.name(), td.partitions().size())
          })
          .recover {
            case _: UnknownTopicOrPartitionException => None
          }
      }

      override def getTopicNames: F[List[TopicName]] =
        getAdminClientResource.use(_.listTopics.names.map(_.toList))

      override def createTopic(name: TopicName, d: TopicDetails): F[Unit] = {
        import scala.collection.JavaConverters._
        val newTopic = new NewTopic(name, d.numPartitions, d.replicationFactor)
          .configs(d.configs.asJava)
        getAdminClientResource.use(_.createTopic(newTopic))
      }

      override def deleteTopic(name: String): F[Unit] =
        getAdminClientResource.use(_.deleteTopic(name))

      private def getAdminClientResource: Resource[F, KafkaAdminClient[F]] = {
        adminClientResource(
          AdminClientSettings.apply.withBootstrapServers(bootstrapServers)
        )
      }
    }
  }

  def test[F[_]: Sync]: F[KafkaAdminAlgebra[F]] =
    Ref[F].of(Map[TopicName, Topic]()).flatMap(getTestKafkaClient[F])

  private[this] def getTestKafkaClient[F[_]: Sync](
      ref: Ref[F, Map[TopicName, Topic]]
  ): F[KafkaAdminAlgebra[F]] = Sync[F].delay {
    new KafkaAdminAlgebra[F] {
      override def describeTopic(name: TopicName): F[Option[Topic]] =
        ref.get.map(_.get(name))

      override def getTopicNames: F[List[TopicName]] =
        ref.get.map(_.keys.toList)

      override def createTopic(
          name: TopicName,
          details: TopicDetails
      ): F[Unit] = {
        val entry = name -> Topic(name, details.numPartitions)
        ref.update(old => old + entry)
      }

      override def deleteTopic(name: String): F[Unit] =
        ref.update(_ - name)
    }
  }

} 
Example 35
Source File: auth.scala    From actors-cats-effect-fs2   with Apache License 2.0 5 votes vote down vote up
package app

import app.actors._
import app.syntax._
import cats.Functor
import cats.effect.{Clock, Concurrent}
import cats.syntax.applicative._
import cats.syntax.flatMap._
import cats.syntax.functor._

object auth {
  def requestNewAuthToken[F[_]](
    implicit F: Functor[F],
    clock: Clock[F]
  ): F[AuthToken] =
    clock.now.map(now => AuthToken(now.plusSeconds(3600), "token"))

  def requestActiveAuthToken[F[_]](
    implicit F: Concurrent[F],
    clock: Clock[F]
  ): F[F[AuthToken]] =
    actor[F, Option[AuthToken], AuthToken](
      initialState = None,
      receive = ref =>
        for {
          existingAuthToken <- ref.get
          now <- clock.now
          authToken <- existingAuthToken
            .filter(_.isActive(now))
            .map(_.pure[F])
            .getOrElse {
              for {
                newAuthToken <- requestNewAuthToken
                _ <- ref.set(Some(newAuthToken))
              } yield newAuthToken
            }
        } yield authToken
    )
} 
Example 36
Source File: actors.scala    From actors-cats-effect-fs2   with Apache License 2.0 5 votes vote down vote up
package app

import app.syntax._
import cats.effect.Concurrent
import cats.effect.concurrent.{Deferred, Ref}
import cats.effect.syntax.concurrent._
import cats.syntax.flatMap._
import cats.syntax.functor._
import fs2.concurrent.Queue

object actors {
  def actor[F[_], S, O](
    initialState: S,
    receive: Ref[F, S] => F[O]
  )(implicit F: Concurrent[F]): F[F[O]] =
    for {
      ref <- Ref.of[F, S](initialState)
      queue <- Queue.unbounded[F, Deferred[F, O]]
      fiber <- (for {
        deferred <- queue.dequeue1
        output <- receive(ref)
        _ <- deferred.complete(output)
      } yield ()).foreverM.void.start
      ask = for {
        deferred <- Deferred[F, O]
        _ <- queue.offer1(deferred)
        output <- (fiber.join race deferred.get)
          .collect { case Right(o) => o }
      } yield output
    } yield ask

  def actorWithInput[F[_], S, I, O](
    initialState: S,
    receive: (I, Ref[F, S]) => F[O]
  )(implicit F: Concurrent[F]): F[I => F[O]] =
    for {
      ref <- Ref.of[F, S](initialState)
      queue <- Queue.unbounded[F, (I, Deferred[F, O])]
      fiber <- (for {
        inputAndDeferred <- queue.dequeue1
        (input, deferred) = inputAndDeferred
        output <- receive(input, ref)
        _ <- deferred.complete(output)
      } yield ()).foreverM.void.start
      ask = (input: I) =>
        for {
          deferred <- Deferred[F, O]
          _ <- queue.offer1((input, deferred))
          output <- (fiber.join race deferred.get)
            .collect { case Right(o) => o }
        } yield output
    } yield ask
} 
Example 37
Source File: ProcessAlg.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.io

import better.files.File
import cats.effect.{Blocker, Concurrent, ContextShift, Timer}
import cats.implicits._
import io.chrisdavenport.log4cats.Logger
import org.scalasteward.core.application.Cli.EnvVar
import org.scalasteward.core.application.Config
import org.scalasteward.core.util.Nel

trait ProcessAlg[F[_]] {
  def exec(command: Nel[String], cwd: File, extraEnv: (String, String)*): F[List[String]]

  def execSandboxed(command: Nel[String], cwd: File): F[List[String]]
}

object ProcessAlg {
  abstract class UsingFirejail[F[_]](config: Config) extends ProcessAlg[F] {
    override def execSandboxed(command: Nel[String], cwd: File): F[List[String]] = {
      val envVars = config.envVars.map(EnvVar.unapply(_).get)
      if (config.disableSandbox)
        exec(command, cwd, envVars: _*)
      else {
        val whitelisted = (cwd.pathAsString :: config.whitelistedDirectories)
          .map(dir => s"--whitelist=$dir")
        val readOnly = config.readOnlyDirectories
          .map(dir => s"--read-only=$dir")
        exec(Nel("firejail", whitelisted ++ readOnly) ::: command, cwd, envVars: _*)
      }
    }
  }

  def create[F[_]](blocker: Blocker)(implicit
      config: Config,
      contextShift: ContextShift[F],
      logger: Logger[F],
      timer: Timer[F],
      F: Concurrent[F]
  ): ProcessAlg[F] =
    new UsingFirejail[F](config) {
      override def exec(
          command: Nel[String],
          cwd: File,
          extraEnv: (String, String)*
      ): F[List[String]] =
        logger.debug(s"Execute ${command.mkString_(" ")}") >>
          process.slurp[F](
            command,
            Some(cwd.toJava),
            extraEnv.toMap,
            config.processTimeout,
            logger.trace(_),
            blocker
          )
    }
} 
Example 38
Source File: package.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
import java.io.OutputStream
import java.nio.file.Files

import cats.effect.{Blocker, Concurrent, ContextShift, Resource, Sync}
import fs2.{Chunk, Hotswap, Pipe, Pull, RaiseThrowable, Stream}
import cats.implicits._

package object blobstore {
  protected[blobstore] def _writeAllToOutputStream1[F[_]](in: Stream[F, Byte], out: OutputStream, blocker: Blocker)(
    implicit F: Sync[F],
    CS: ContextShift[F]
  ): Pull[F, Nothing, Unit] = {
    in.pull.uncons.flatMap {
      case None => Pull.done
      case Some((hd, tl)) =>
        Pull.eval[F, Unit](blocker.delay(out.write(hd.toArray))) >> _writeAllToOutputStream1(tl, out, blocker)
    }
  }

  protected[blobstore] def bufferToDisk[F[_]](
    chunkSize: Int,
    blocker: Blocker
  )(implicit F: Sync[F], CS: ContextShift[F]): Pipe[F, Byte, (Long, Stream[F, Byte])] = { in =>
    Stream.bracket(F.delay(Files.createTempFile("bufferToDisk", ".bin")))(p => F.delay(p.toFile.delete).void).flatMap {
      p =>
        in.through(fs2.io.file.writeAll(p, blocker)).drain ++
          Stream.emit((p.toFile.length, fs2.io.file.readAll(p, blocker, chunkSize)))
    }
  }

  private[blobstore] def putRotateBase[F[_]: Concurrent, T](
    limit: Long,
    openNewFile: Resource[F, T]
  )(consume: T => Chunk[Byte] => F[Unit]): Pipe[F, Byte, Unit] = { in =>
    Stream
      .resource(Hotswap(openNewFile))
      .flatMap {
        case (hotswap, newFile) =>
          goRotate(limit, 0L, in, newFile, hotswap, openNewFile)(
            consume = consumer => bytes => Pull.eval(consume(consumer)(bytes)).as(consumer),
            extract = Stream.emit
          ).stream
      }
  }

  private[blobstore] def goRotate[F[_]: RaiseThrowable, A, B](
    limit: Long,
    acc: Long,
    s: Stream[F, Byte],
    consumer: B,
    hotswap: Hotswap[F, A],
    resource: Resource[F, A]
  )(
    consume: B => Chunk[Byte] => Pull[F, Unit, B],
    extract: A => Stream[F, B]
  ): Pull[F, Unit, Unit] = {
    val toWrite = (limit - acc).min(Int.MaxValue.toLong).toInt
    s.pull.unconsLimit(toWrite).flatMap {
      case Some((hd, tl)) =>
        val newAcc = acc + hd.size
        consume(consumer)(hd).flatMap { consumer =>
          if (newAcc >= limit) {
            Pull
              .eval(hotswap.swap(resource))
              .flatMap(a => extract(a).pull.headOrError)
              .flatMap(nc => goRotate(limit, 0L, tl, nc, hotswap, resource)(consume, extract))
          } else {
            goRotate(limit, newAcc, tl, consumer, hotswap, resource)(consume, extract)
          }
        }
      case None => Pull.done
    }
  }

} 
Example 39
Source File: util.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore

import java.util.concurrent.{CancellationException, CompletableFuture, CompletionException}
import cats.effect.Concurrent
import cats.syntax.flatMap._
import cats.syntax.functor._

object util {
  def liftJavaFuture[F[_], A](fa: F[CompletableFuture[A]])(implicit F: Concurrent[F]): F[A] = fa.flatMap { cf =>
    F.cancelable { cb =>
      cf.handle[Unit]((result: A, err: Throwable) =>
        Option(err) match {
          case None =>
            cb(Right(result))
          case Some(_: CancellationException) =>
            ()
          case Some(ex: CompletionException) =>
            cb(Left(Option(ex.getCause).getOrElse(ex)))
          case Some(ex) =>
            cb(Left(ex))
        }
      )
      F.delay(cf.cancel(true)).void
    }
  }
} 
Example 40
Source File: Broadcast.scala    From canoe   with MIT License 5 votes vote down vote up
package canoe.api

import cats.syntax.all._
import cats.instances.list._
import cats.effect.{Concurrent}
import cats.effect.concurrent.Ref
import fs2.{Pipe, Stream}
import fs2.concurrent.{Queue, Topic}


private[api] class Broadcast[F[_], A](subs: Ref[F, List[Queue[F, A]]])(implicit C: Concurrent[F]) extends Topic[F, A] {
  def publish: Pipe[F, A, Unit] =
    _.evalMap(publish1)

  def publish1(a: A): F[Unit] =
    subs.get.flatMap(_.traverse_(_.enqueue1(a)))

  def subscribe(maxQueued: Int): Stream[F, A] =
    subscription(maxQueued).evalTap(q => subs.update(q :: _)).flatMap(_.dequeue)

  private def subscription(maxQueued: Int): Stream[F, Queue[F, A]] =
    Stream.bracket(Queue.bounded[F, A](maxQueued)) { q =>
      subs.update(_.filter(_ ne q)) *> q.tryDequeue1.void
    }

  def subscribeSize(maxQueued: Int): Stream[F, (A, Int)] =
    subscribe(maxQueued).zip(subscribers)

  def subscribers: Stream[F, Int] =
    Stream.repeatEval(subs.get).map(_.size)
}

object Broadcast {
  private [api] def apply[F[_], A](implicit C: Concurrent[F]): F[Broadcast[F, A]] =
    Ref.of[F, List[Queue[F, A]]](List.empty).map(new Broadcast(_))
} 
Example 41
Source File: SemaphoreBenchmark.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.stm

import java.util.concurrent.TimeUnit

import scala.concurrent.ExecutionContext

import cats.effect.{ ContextShift, IO => CIO }
import org.openjdk.jmh.annotations._

import zio.IOBenchmarks._
import zio._

@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Measurement(iterations = 15, timeUnit = TimeUnit.SECONDS, time = 10)
@Warmup(iterations = 15, timeUnit = TimeUnit.SECONDS, time = 10)
@Fork(1)
class SemaphoreBenchmark {
  @Param(Array("10"))
  var fibers: Int = _

  @Param(Array("1000"))
  var ops: Int = _

  @Benchmark
  def semaphoreContention() =
    unsafeRun(for {
      sem   <- Semaphore.make(fibers / 2L)
      fiber <- ZIO.forkAll(List.fill(fibers)(repeat(ops)(sem.withPermit(ZIO.succeedNow(1)))))
      _     <- fiber.join
    } yield ())

  @Benchmark
  def tsemaphoreContention() =
    unsafeRun(for {
      sem   <- TSemaphore.make(fibers / 2L).commit
      fiber <- ZIO.forkAll(List.fill(fibers)(repeat(ops)(sem.withPermit(STM.succeedNow(1)).commit)))
      _     <- fiber.join
    } yield ())

  @Benchmark
  def semaphoreCatsContention() = {
    import cats.effect.Concurrent
    import cats.effect.concurrent.Semaphore
    implicit val contextShift: ContextShift[CIO] = CIO.contextShift(ExecutionContext.global)

    (for {
      sem   <- Semaphore(fibers / 2L)(Concurrent[CIO])
      fiber <- catsForkAll(List.fill(fibers)(catsRepeat(ops)(sem.withPermit(CIO(1)))))
      _     <- fiber.join
    } yield ()).unsafeRunSync()
  }
} 
Example 42
Source File: MetadataAlgebraSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import java.time.Instant

import cats.data.NonEmptyList
import cats.effect.{Concurrent, ContextShift, IO, Sync, Timer}
import cats.implicits._
import hydra.avro.registry.SchemaRegistry
import hydra.core.marshallers.History
import hydra.kafka.algebras.MetadataAlgebra.TopicMetadataContainer
import hydra.kafka.model.ContactMethod.Slack
import hydra.kafka.model.TopicMetadataV2Request.Subject
import hydra.kafka.model.{Public, StreamTypeV2, TopicMetadataV2, TopicMetadataV2Key, TopicMetadataV2Request, TopicMetadataV2Value}
import io.chrisdavenport.log4cats.SelfAwareStructuredLogger
import io.chrisdavenport.log4cats.slf4j.Slf4jLogger
import org.apache.avro.generic.GenericRecord
import org.scalatest.Assertion
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import retry.RetryPolicies._
import retry.syntax.all._
import retry.{RetryPolicy, _}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class MetadataAlgebraSpec extends AnyWordSpecLike with Matchers {

  implicit private val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect

  private implicit val policy: RetryPolicy[IO] = limitRetries[IO](5) |+| exponentialBackoff[IO](500.milliseconds)
  private implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)
  private implicit def noop[A]: (A, RetryDetails) => IO[Unit] = retry.noop[IO, A]

  implicit private def unsafeLogger[F[_]: Sync]: SelfAwareStructuredLogger[F] =
    Slf4jLogger.getLogger[F]

  private implicit class RetryAndAssert[A](boolIO: IO[A]) {
    def retryIfFalse(check: A => Boolean): IO[Assertion] =
      boolIO.map(check).retryingM(identity, policy, noop).map(assert(_))
  }


  private val metadataTopicName = "_internal.metadataTopic"
  private val consumerGroup = "Consumer Group"

  (for {
    kafkaClient <- KafkaClientAlgebra.test[IO]
    schemaRegistry <- SchemaRegistry.test[IO]
    metadata <- MetadataAlgebra.make(metadataTopicName, consumerGroup, kafkaClient, schemaRegistry, consumeMetadataEnabled = true)
  } yield {
    runTests(metadata, kafkaClient)
  }).unsafeRunSync()

  private def runTests(metadataAlgebra: MetadataAlgebra[IO], kafkaClientAlgebra: KafkaClientAlgebra[IO]): Unit = {
    "MetadataAlgebraSpec" should {

      "retrieve none for non-existant topic" in {
        val subject = Subject.createValidated("Non-existantTopic").get
        metadataAlgebra.getMetadataFor(subject).unsafeRunSync() shouldBe None
      }

      "retrieve metadata" in {
        val subject = Subject.createValidated("subject1").get
        val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject)

        (for {
          record <- genericRecordsIO
          _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName)
          _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined)
          metadata <- metadataAlgebra.getMetadataFor(subject)
        } yield metadata shouldBe Some(TopicMetadataContainer(key, value, None, None))).unsafeRunSync()
      }

      "retrieve all metadata" in {
        val subject = Subject.createValidated("subject2").get
        val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject)
        (for {
          record <- genericRecordsIO
          _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName)
          _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined)
          allMetadata <- metadataAlgebra.getAllMetadata
        } yield allMetadata should have length 2).unsafeRunSync()
      }
    }
  }

  private def getMetadataGenericRecords(subject: Subject): (IO[(GenericRecord, Option[GenericRecord])], TopicMetadataV2Key, TopicMetadataV2Value) = {
    val key = TopicMetadataV2Key(subject)
    val value = TopicMetadataV2Value(
        StreamTypeV2.Entity,
        deprecated = false,
        Public,
        NonEmptyList.one(Slack.create("#channel").get),
        Instant.now,
        List(),
        None)
    (TopicMetadataV2.encode[IO](key, Some(value)), key, value)
  }
} 
Example 43
Source File: CassandraSync.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import cats.arrow.FunctionK
import cats.effect.concurrent.Semaphore
import cats.effect.{Concurrent, Sync, Timer}
import cats.implicits._
import cats.~>
import com.evolutiongaming.cassandra
import com.evolutiongaming.cassandra.sync.AutoCreate
import com.evolutiongaming.kafka.journal.Origin

trait CassandraSync[F[_]] {
  def apply[A](fa: F[A]): F[A]
}

object CassandraSync {

  def empty[F[_]]: CassandraSync[F] = new CassandraSync[F] {
    def apply[A](fa: F[A]) = fa
  }


  def apply[F[_]](implicit F: CassandraSync[F]): CassandraSync[F] = F


  def apply[F[_] : Sync : Timer : CassandraSession](
    config: SchemaConfig,
    origin: Option[Origin],
  ): CassandraSync[F] = {

    val keyspace = config.keyspace
    val autoCreate = if (keyspace.autoCreate) AutoCreate.Table else AutoCreate.None
    apply(
      keyspace = keyspace.name,
      table = config.locksTable,
      autoCreate = autoCreate,
      metadata = origin.map(_.value))
  }

  def apply[F[_] : Sync : Timer : CassandraSession](
    keyspace: String,
    table: String,
    autoCreate: AutoCreate,
    metadata: Option[String],
  ): CassandraSync[F] = {

    new CassandraSync[F] {

      def apply[A](fa: F[A]) = {

        val cassandraSync = cassandra.sync.CassandraSync.of[F](
          session = CassandraSession[F].unsafe,
          keyspace = keyspace,
          table = table,
          autoCreate = autoCreate)

        for {
          cassandraSync <- cassandraSync
          result        <- cassandraSync(id = "kafka-journal", metadata = metadata)(fa)
        } yield result
      }
    }
  }

  def of[F[_] : Concurrent : Timer : CassandraSession](
    config: SchemaConfig,
    origin: Option[Origin]
  ): F[CassandraSync[F]] = {

    for {
      semaphore <- Semaphore[F](1)
    } yield {
      val cassandraSync = apply[F](config, origin)
      val serial = new (F ~> F) {
        def apply[A](fa: F[A]) = semaphore.withPermit(fa)
      }
      cassandraSync.mapK(serial, FunctionK.id)
    }
  }


  implicit class CassandraSyncOps[F[_]](val self: CassandraSync[F]) extends AnyVal {

    def mapK[G[_]](fg: F ~> G, gf: G ~> F): CassandraSync[G] = new CassandraSync[G] {

      def apply[A](fa: G[A]) = fg(self(gf(fa)))
    }
  }
} 
Example 44
Source File: IngestionFlowSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import cats.effect.{Concurrent, ContextShift, IO}
import hydra.avro.registry.SchemaRegistry
import hydra.core.ingest.HydraRequest
import hydra.core.ingest.RequestParams.{HYDRA_KAFKA_TOPIC_PARAM,HYDRA_RECORD_KEY_PARAM}
import hydra.ingest.services.IngestionFlow.MissingTopicNameException
import hydra.kafka.algebras.KafkaClientAlgebra
import org.apache.avro.{Schema, SchemaBuilder}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.ExecutionContext

class IngestionFlowSpec extends AnyFlatSpec with Matchers {

  private implicit val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect
  private implicit val mode: scalacache.Mode[IO] = scalacache.CatsEffect.modes.async

  private val testSubject: String = "test_subject"

  private val testSubjectNoKey: String = "test_subject_no_key"

  private val testKey: String = "test"

  private val testPayload: String =
    s"""{"id": "$testKey", "testField": true}"""

  private val testSchema: Schema = SchemaBuilder.record("TestRecord")
    .prop("hydra.key", "id")
    .fields().requiredString("id").requiredBoolean("testField").endRecord()

  private val testSchemaNoKey: Schema = SchemaBuilder.record("TestRecordNoKey")
    .fields().requiredString("id").requiredBoolean("testField").endRecord()

  private def ingest(request: HydraRequest): IO[KafkaClientAlgebra[IO]] = for {
    schemaRegistry <- SchemaRegistry.test[IO]
    _ <- schemaRegistry.registerSchema(testSubject + "-value", testSchema)
    _ <- schemaRegistry.registerSchema(testSubjectNoKey + "-value", testSchemaNoKey)
    kafkaClient <- KafkaClientAlgebra.test[IO]
    ingestFlow <- IO(new IngestionFlow[IO](schemaRegistry, kafkaClient, "https://schemaRegistry.notreal"))
    _ <- ingestFlow.ingest(request)
  } yield kafkaClient

  it should "ingest a message" in {
    val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_KAFKA_TOPIC_PARAM -> testSubject))
    ingest(testRequest).flatMap { kafkaClient =>
      kafkaClient.consumeStringKeyMessages(testSubject, "test-consumer").take(1).compile.toList.map { publishedMessages =>
        val firstMessage = publishedMessages.head
        (firstMessage._1, firstMessage._2.get.toString) shouldBe (Some(testKey), testPayload)
      }
    }.unsafeRunSync()
  }

  it should "ingest a message with a null key" in {
    val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_KAFKA_TOPIC_PARAM -> testSubjectNoKey))
    ingest(testRequest).flatMap { kafkaClient =>
      kafkaClient.consumeStringKeyMessages(testSubjectNoKey, "test-consumer").take(1).compile.toList.map { publishedMessages =>
        val firstMessage = publishedMessages.head
        (firstMessage._1, firstMessage._2.get.toString) shouldBe (None, testPayload)
      }
    }.unsafeRunSync()
  }

  it should "return an error when no topic name is provided" in {
    val testRequest = HydraRequest("correlationId", testPayload)
    ingest(testRequest).attempt.unsafeRunSync() shouldBe Left(MissingTopicNameException(testRequest))
  }

  it should "take the key from the header if present" in {
    val headerKey = "someDifferentKey"
    val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_RECORD_KEY_PARAM -> headerKey, HYDRA_KAFKA_TOPIC_PARAM -> testSubject))
    ingest(testRequest).flatMap { kafkaClient =>
      kafkaClient.consumeStringKeyMessages(testSubject, "test-consumer").take(1).compile.toList.map { publishedMessages =>
        val firstMessage = publishedMessages.head
        (firstMessage._1, firstMessage._2.get.toString) shouldBe (Some(headerKey), testPayload)
      }
    }.unsafeRunSync()

  }

} 
Example 45
Source File: HealthCheck.scala    From sup   with Apache License 2.0 5 votes vote down vote up
package sup

import cats.data.{EitherK, Tuple2K}
import cats.effect.Concurrent
import cats.{~>, Applicative, ApplicativeError, Apply, Eq, Functor, Id, Monoid, NonEmptyParallel}
import cats.implicits._
import cats.effect.implicits._
import cats.tagless.FunctorK
import cats.tagless.implicits._


  def race[F[_]: Concurrent, H[_], I[_]](a: HealthCheck[F, H], b: HealthCheck[F, I]): HealthCheck[F, EitherK[H, I, ?]] =
    liftF {
      a.check.race(b.check).map(e => HealthResult(EitherK(e.bimap(_.value, _.value))))
    }

  implicit def functorK[F[_]: Functor]: FunctorK[HealthCheck[F, ?[_]]] = new FunctorK[HealthCheck[F, ?[_]]] {
    override def mapK[G[_], H[_]](fgh: HealthCheck[F, G])(gh: G ~> H): HealthCheck[F, H] = fgh.mapK(gh)
  }

  implicit def checkMonoid[F[_]: Applicative, H[_]: Applicative](
    implicit M: Monoid[Health]
  ): Monoid[HealthCheck[F, H]] =
    new Monoid[HealthCheck[F, H]] {
      override val empty: HealthCheck[F, H] = HealthCheck.const[F, H](M.empty)

      override def combine(x: HealthCheck[F, H], y: HealthCheck[F, H]): HealthCheck[F, H] = liftF {
        Applicative.monoid[F, HealthResult[H]].combine(x.check, y.check)
      }
    }

  implicit def healthCheckEq[F[_], H[_]](implicit F: Eq[F[HealthResult[H]]]): Eq[HealthCheck[F, H]] = Eq.by(_.check)
} 
Example 46
Source File: MemoryQueue.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.queue.memory

import cats.effect.Concurrent
import cats.implicits._
import fs2.Pipe
import fs2.concurrent.{Queue => Fs2Queue}
import jbok.core.queue.Queue

final class MemoryQueue[F[_], K, V](queue: Fs2Queue[F, (K, V)]) extends Queue[F, K, V] {
  override def produce(key: K, value: V): F[Unit] =
    queue.enqueue1(key -> value)

  override def sink: Pipe[F, (K, V), Unit] =
    queue.enqueue

  override def consume: fs2.Stream[F, (K, V)] =
    queue.dequeue
}

object MemoryQueue {
  def apply[F[_], K, V](implicit F: Concurrent[F]): F[Queue[F, K, V]] =
    Fs2Queue.circularBuffer[F, (K, V)](1000000).map(queue => new MemoryQueue(queue))
} 
Example 47
Source File: Peer.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.peer

import cats.effect.concurrent.Ref
import cats.effect.{Concurrent, Sync}
import cats.implicits._
import fs2.concurrent.Queue
import jbok.core.messages.{SignedTransactions, Status}
import jbok.network.Message
import scodec.bits.ByteVector
import jbok.codec.rlp.implicits._
import jbok.common.log.Logger
import jbok.common.math.N
import jbok.crypto._

final case class Peer[F[_]](
    uri: PeerUri,
    queue: Queue[F, Message[F]],
    status: Ref[F, Status],
    knownBlocks: Ref[F, Set[ByteVector]],
    knownTxs: Ref[F, Set[ByteVector]]
)(implicit F: Sync[F]) {
  import Peer._

  private[this] val log = Logger[F]

  def hasBlock(blockHash: ByteVector): F[Boolean] =
    knownBlocks.get.map(_.contains(blockHash))

  def hasTxs(stxs: SignedTransactions): F[Boolean] =
    knownTxs.get.map(_.contains(stxs.encoded.bytes.kec256))

  def markBlock(blockHash: ByteVector, number: N): F[Unit] =
    knownBlocks.update(s => s.take(MaxKnownBlocks - 1) + blockHash) >>
      status.update(s => s.copy(bestNumber = s.bestNumber.max(number)))

  def markTxs(stxs: SignedTransactions): F[Unit] =
    knownTxs.update(known => known.take(MaxKnownTxs - 1) + stxs.encoded.bytes.kec256)

  def markStatus(newStatus: Status): F[Unit] =
    status.update(s => if (newStatus.td > s.td) s.copy(bestNumber = newStatus.bestNumber, td = newStatus.td) else s)
}

object Peer {
  val MaxKnownTxs    = 32768
  val MaxKnownBlocks = 1024

  def apply[F[_]: Concurrent](uri: PeerUri, status: Status): F[Peer[F]] =
    for {
      queue       <- Queue.circularBuffer[F, Message[F]](100000)
      status      <- Ref.of[F, Status](status)
      knownBlocks <- Ref.of[F, Set[ByteVector]](Set.empty)
      knownTxs    <- Ref.of[F, Set[ByteVector]](Set.empty)
    } yield Peer[F](uri, queue, status, knownBlocks, knownTxs)
} 
Example 48
Source File: PeerStore.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.peer

import cats.effect.Concurrent
import cats.implicits._
import fs2._
import fs2.concurrent.Queue
import jbok.core.store.ColumnFamilies
import jbok.persistent.{KVStore, SingleColumnKVStore}

final class PeerStore[F[_]](store: SingleColumnKVStore[F, String, PeerUri], queue: Queue[F, PeerUri])(implicit F: Concurrent[F]) {
  def get(uri: String): F[Option[PeerUri]] =
    store.get(uri)

  def put(uri: PeerUri): F[Unit] =
    store.put(uri.uri, uri) >> queue.enqueue1(uri)

  def add(uris: PeerUri*): F[Unit] =
    uris.toList.traverse_(put)

  def del(uri: String): F[Unit] =
    store.del(uri)

  def getAll: F[List[PeerUri]] =
    store.toMap.map(_.values.toList)

  def subscribe: Stream[F, PeerUri] =
    queue.dequeue
}

object PeerStore {
  def apply[F[_]](db: KVStore[F])(implicit F: Concurrent[F]): F[PeerStore[F]] =
    for {
      queue <- Queue.circularBuffer[F, PeerUri](1000)
      store = SingleColumnKVStore[F, String, PeerUri](ColumnFamilies.Peer, db)
    } yield new PeerStore[F](store, queue)
} 
Example 49
Source File: PeerMessageHandler.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.peer

import cats.effect.Concurrent
import cats.effect.concurrent.Ref
import cats.implicits._
import fs2._
import jbok.codec.rlp.implicits._
import jbok.common.log.Logger
import jbok.core.NodeStatus
import jbok.core.messages.{BlockHash, NewBlock, NewBlockHashes, SignedTransactions, Status}
import jbok.core.models.Block
import jbok.core.peer.PeerSelector.PeerSelector
import jbok.core.queue.{Consumer, Producer}
import jbok.network.Request

class PeerMessageHandler[F[_]](
    txInbound: Producer[F, Peer[F], SignedTransactions],
    txOutbound: Consumer[F, PeerSelector[F], SignedTransactions],
    blockInbound: Producer[F, Peer[F], Block],
    blockOutbound: Consumer[F, PeerSelector[F], Block],
    statusInbound: Producer[F, Peer[F], Status],
    statusOutbound: Consumer[F, PeerSelector[F], Status],
    peerManager: PeerManager[F],
    status: Ref[F, NodeStatus]
)(implicit F: Concurrent[F]) {
  private[this] val log = Logger[F]

  def onNewBlockHashes(peer: Peer[F], hashes: List[BlockHash]): F[Unit] =
    hashes.traverse_(hash => peer.markBlock(hash.hash, hash.number))

  def onNewBlock(peer: Peer[F], block: Block): F[Unit] =
    status.get.flatMap {
      case NodeStatus.Done => blockInbound.produce(peer, block)
      case _               => F.unit
    }

  def onSignedTransactions(peer: Peer[F], stxs: SignedTransactions): F[Unit] =
    txInbound.produce(peer, stxs)

  def onStatus(peer: Peer[F], remoteStatus: Status):F[Unit] =
    for {
      localStatus <- peerManager.outgoing.localStatus
      _ <- if (!localStatus.isCompatible(remoteStatus)) {
        F.raiseError(Incompatible(localStatus, remoteStatus))
      }else{
        peer.markStatus(remoteStatus)
      }
    } yield ()

  val consume: Stream[F, Unit] =
    peerManager.inbound.evalMap {
      case (peer, req @ Request(_, NewBlockHashes.name, _, _)) =>
        for {
          hashes <- req.as[NewBlockHashes].map(_.hashes)
          _      <- onNewBlockHashes(peer, hashes)
        } yield ()

      case (peer, req @ Request(_, NewBlock.name, _, _)) =>
        for {
          block <- req.as[NewBlock].map(_.block)
          _     <- onNewBlock(peer, block)
        } yield ()

      case (peer, req @ Request(_, SignedTransactions.name, _, _)) =>
        for {
          stxs <- req.as[SignedTransactions]
          _    <- onSignedTransactions(peer, stxs)
        } yield ()

      case (peer, req @ Request(_, Status.name, _, _)) =>
        for {
          status <- req.as[Status]
          _ <- onStatus(peer, status)
        } yield ()

      case _ => F.unit
    }

  val produce: Stream[F, Unit] = {
    Stream(
      blockOutbound.consume.map { case (selector, block) => selector -> Request.binary[F, NewBlock](NewBlock.name, NewBlock(block).encoded) },
      txOutbound.consume.map { case (selector, tx)       => selector -> Request.binary[F, SignedTransactions](SignedTransactions.name, tx.encoded) },
      statusOutbound.consume.map { case (selector, st)       => selector -> Request.binary[F, Status](Status.name, st.encoded) }
    ).parJoinUnbounded
      .through(peerManager.outbound)
  }

  val stream: Stream[F, Unit] =
    Stream.eval_(log.i(s"starting Core/PeerMessageHandler")) ++
      consume merge produce
} 
Example 50
Source File: BlockService.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.app.service

import cats.effect.Concurrent
import cats.implicits._
import jbok.common.math.N
import jbok.core.config.SyncConfig
import jbok.core.ledger.History
import jbok.core.models.{Block, BlockBody, BlockHeader}
import jbok.core.api.BlockAPI
import jbok.core.messages.Status
import scodec.bits.ByteVector
import spire.compat._

final class BlockService[F[_]](history: History[F], config: SyncConfig)(implicit F: Concurrent[F]) extends BlockAPI[F] {
  override def getStatus: F[Status] =
    for {
      genesis <- history.genesisHeader
      number  <- history.getBestBlockNumber
      td      <- history.getTotalDifficultyByNumber(number).map(_.getOrElse(N(0)))
    } yield Status(history.chainId, genesis.hash, number, td, "")

  override def getBestBlockNumber: F[N] =
    history.getBestBlockNumber

  override def getBlockHeaderByNumber(number: N): F[Option[BlockHeader]] =
    history.getBlockHeaderByNumber(number)

  override def getBlockHeadersByNumber(start: N, limit: Int): F[List[BlockHeader]] = {
    val headersCount = math.min(limit, config.maxBlockHeadersPerRequest)
    val range        = List.range(start, start + headersCount)
    range.traverse(history.getBlockHeaderByNumber).map(_.flatten)
  }

  override def getBlockHeaderByHash(hash: ByteVector): F[Option[BlockHeader]] =
    history.getBlockHeaderByHash(hash)

  override def getBlockBodyByHash(hash: ByteVector): F[Option[BlockBody]] =
    history.getBlockBodyByHash(hash)

  override def getBlockBodies(hashes: List[ByteVector]): F[List[BlockBody]] =
    hashes
      .take(config.maxBlockBodiesPerRequest)
      .traverse(hash => history.getBlockBodyByHash(hash))
      .map(_.flatten)

  override def getBlockByNumber(number: N): F[Option[Block]] =
    history.getBlockByNumber(number)

  override def getBlockByHash(hash: ByteVector): F[Option[Block]] =
    history.getBlockByHash(hash)

  override def getTransactionCountByHash(hash: ByteVector): F[Option[Int]] =
    history.getBlockBodyByHash(hash).map(_.map(_.transactionList.length))

  override def getTotalDifficultyByNumber(number: N): F[Option[N]] =
    history.getTotalDifficultyByNumber(number)

  override def getTotalDifficultyByHash(hash: ByteVector): F[Option[N]] =
    history.getTotalDifficultyByHash(hash)
} 
Example 51
Source File: TestConsole.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.cli.dummies

import cats.implicits._
import cats.effect.Concurrent
import ch.epfl.bluebrain.nexus.cli.Console
import fs2.concurrent.Queue


final class TestConsole[F[_]](
    val stdQueue: Queue[F, String],
    val errQueue: Queue[F, String]
) extends Console[F] {

  override def println(line: String): F[Unit]    =
    stdQueue.enqueue1(line)
  override def printlnErr(line: String): F[Unit] =
    errQueue.enqueue1(line)
}

object TestConsole {

  final def apply[F[_]: Concurrent]: F[TestConsole[F]] =
    for {
      std <- Queue.circularBuffer[F, String](100)
      err <- Queue.circularBuffer[F, String](100)
    } yield new TestConsole[F](std, err)

} 
Example 52
Source File: ResultSet.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import cats.effect.{Concurrent, Sync}
import cats.effect.implicits._
import cats.implicits._
import com.datastax.driver.core.{Row, ResultSet => ResultSetJ}
import com.evolutiongaming.scassandra.util.FromGFuture
import com.evolutiongaming.sstream.Stream
import com.evolutiongaming.sstream.FoldWhile._


object ResultSet {

  def apply[F[_] : Concurrent : FromGFuture](resultSet: ResultSetJ): Stream[F, Row] = {

    val iterator = resultSet.iterator()

    val fetch = FromGFuture[F].apply { resultSet.fetchMoreResults() }.void

    val fetched = Sync[F].delay { resultSet.isFullyFetched }

    val next = Sync[F].delay { List.fill(resultSet.getAvailableWithoutFetching)(iterator.next()) }

    apply[F, Row](fetch, fetched, next)
  }

  def apply[F[_] : Concurrent, A](
    fetch: F[Unit],
    fetched: F[Boolean],
    next: F[List[A]]
  ): Stream[F, A] = new Stream[F, A] {

    def foldWhileM[L, R](l: L)(f: (L, A) => F[Either[L, R]]) = {

      l.tailRecM[F, Either[L, R]] { l =>

        def apply(rows: List[A]) = {
          for {
            result <- rows.foldWhileM(l)(f)
          } yield {
            result.asRight[L]
          }
        }

        def fetchAndApply(rows: List[A]) = {
          for {
            fetching <- fetch.start
            result   <- rows.foldWhileM(l)(f)
            result   <- result match {
              case l: Left[L, R]  => fetching.join as l.rightCast[Either[L, R]]
              case r: Right[L, R] => r.leftCast[L].asRight[L].pure[F]
            }
          } yield result
        }

        for {
          fetched <- fetched
          rows    <- next
          result  <- if (fetched) apply(rows) else fetchAndApply(rows)
        } yield result
      }
    }
  }
} 
Example 53
Source File: CreateSchema.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import cats.Monad
import cats.data.{NonEmptyList => Nel}
import cats.effect.Concurrent
import cats.implicits._
import com.evolutiongaming.catshelper.LogOf
import com.evolutiongaming.scassandra.TableName

object CreateSchema {

  type Fresh = Boolean


  def apply[F[_] : Concurrent : CassandraCluster : CassandraSession : CassandraSync : LogOf](
    config: SchemaConfig
  ): F[(Schema, Fresh)] = {

    for {
      createTables   <- CreateTables.of[F]
      createKeyspace  = CreateKeyspace[F]
      result         <- apply[F](config, createKeyspace, createTables)
    } yield result
  }

  def apply[F[_] : Monad](
    config: SchemaConfig,
    createKeyspace: CreateKeyspace[F],
    createTables: CreateTables[F]
  ): F[(Schema, Fresh)] = {

    def createTables1 = {
      val keyspace = config.keyspace.name

      def tableName(table: CreateTables.Table) = TableName(keyspace = keyspace, table = table.name)

      def table(name: String, query: TableName => Nel[String]) = {
        val tableName = TableName(keyspace = keyspace, table = name)
        CreateTables.Table(name = name, queries = query(tableName))
      }

      val journal = table(config.journalTable, a => Nel.of(JournalStatements.createTable(a)))

      val metadata = table(config.metadataTable, a => Nel.of(MetadataStatements.createTable(a)))

      val metaJournal = table(config.metaJournalTable, a => MetaJournalStatements.createTable(a))

      val pointer = table(config.pointerTable, a => Nel.of(PointerStatements.createTable(a)))

      val setting = table(config.settingTable, a => Nel.of(SettingStatements.createTable(a)))

      val schema = Schema(
        journal = tableName(journal),
        metadata = tableName(metadata),
        metaJournal = tableName(metaJournal),
        pointer = tableName(pointer),
        setting = tableName(setting))

      if (config.autoCreate) {
        for {
          result <- createTables(keyspace, Nel.of(journal, metadata, pointer, setting, metaJournal))
        } yield {
          (schema, result)
        }
      } else {
        (schema, false).pure[F]
      }
    }

    for {
      _      <- createKeyspace(config.keyspace)
      result <- createTables1
    } yield result
  }
} 
Example 54
Source File: SetupSchema.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import cats.Parallel
import cats.effect.{Concurrent, Timer}
import cats.implicits._
import com.evolutiongaming.kafka.journal.{Origin, Setting, Settings}
import com.evolutiongaming.scassandra.TableName
import com.evolutiongaming.catshelper.CatsHelper._
import com.evolutiongaming.catshelper.{BracketThrowable, FromFuture, LogOf, ToFuture}
import com.evolutiongaming.kafka.journal.eventual.cassandra.CassandraHelper._

import scala.util.Try

object SetupSchema { self =>

  def migrate[F[_] : BracketThrowable : CassandraSession : CassandraSync : Settings](
    schema: Schema,
    fresh: CreateSchema.Fresh
  ): F[Unit] = {

    def addHeaders(table: TableName)(implicit cassandraSync: CassandraSync[F]) = {
      val query = JournalStatements.addHeaders(table)
      val fa = query.execute.first.redeem[Unit](_ => (), _ => ())
      cassandraSync { fa }
    }

    val schemaVersion = "schema-version"

    def version(setting: Option[Setting]) = {
      for {
        setting <- setting
        version <- Try(setting.value.toInt).toOption
      } yield version
    }

    def migrate = {
      for {
        _ <- if (fresh) ().pure[F] else addHeaders(schema.journal)
        _ <- Settings[F].setIfEmpty(schemaVersion, "0")
      } yield {}
    }

    for {
      setting <- Settings[F].get(schemaVersion)
      _       <- version(setting).fold(migrate)(_ => ().pure[F])
    } yield {}
  }

  def apply[F[_] : Concurrent : Parallel : Timer : CassandraCluster : CassandraSession : FromFuture : ToFuture : LogOf](
    config: SchemaConfig,
    origin: Option[Origin]
  ): F[Schema] = {

    def migrate(
      schema: Schema,
      fresh: CreateSchema.Fresh)(implicit
      cassandraSync: CassandraSync[F],
      settings: Settings[F]
    ) = {

      self.migrate[F](schema, fresh)
    }

    def createSchema(implicit cassandraSync: CassandraSync[F]) = CreateSchema(config)
    
    for {
      cassandraSync   <- CassandraSync.of[F](config, origin)
      ab              <- createSchema(cassandraSync)
      (schema, fresh)  = ab
      settings        <- SettingsCassandra.of[F](schema, origin)
      _               <- migrate(schema, fresh)(cassandraSync, settings)
    } yield schema
  }
} 
Example 55
Source File: CassandraCluster.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import cats.effect.{Concurrent, Resource}
import cats.implicits._
import com.evolutiongaming.catshelper.FromFuture
import com.evolutiongaming.scassandra.{CassandraClusterOf, CassandraConfig}
import com.evolutiongaming.scassandra
import com.evolutiongaming.scassandra.util.FromGFuture

trait CassandraCluster[F[_]] {

  def session: Resource[F, CassandraSession[F]]

  def metadata: F[CassandraMetadata[F]]
}

object CassandraCluster {

  def apply[F[_]](implicit F: CassandraCluster[F]): CassandraCluster[F] = F

  def apply[F[_] : Concurrent : FromGFuture](
    cluster: scassandra.CassandraCluster[F],
    retries: Int
  ): CassandraCluster[F] = new CassandraCluster[F] {

    def session = {
      for {
        session <- cluster.connect
        session <- CassandraSession.of[F](session)
      } yield {
        CassandraSession(session, retries)
      }
    }

    def metadata = {
      for {
        metadata <- cluster.metadata
      } yield {
        CassandraMetadata[F](metadata)
      }
    }
  }

  def of[F[_] : Concurrent : FromFuture : FromGFuture](
    config: CassandraConfig,
    cassandraClusterOf: CassandraClusterOf[F],
    retries: Int,
  ): Resource[F, CassandraCluster[F]] = {

    for {
      cluster <- cassandraClusterOf(config)
    } yield {
      apply[F](cluster, retries)
    }
  }
} 
Example 56
Source File: ResultSetSpec.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.eventual.cassandra

import cats.effect.concurrent.Ref
import cats.effect.{Concurrent, IO}
import cats.implicits._
import com.evolutiongaming.kafka.journal.IOSuite._
import org.scalatest.funsuite.AsyncFunSuite
import org.scalatest.matchers.should.Matchers

import scala.util.control.NoStackTrace


class ResultSetSpec extends AsyncFunSuite with Matchers {

  for {
    size      <- 0 to 5
    take      <- 1 to 5
    fetchSize <- 1 to 5
  } {
    test(s"size: $size, take: $take, fetchSize: $fetchSize") {
      testF[IO](size = size, take = take, fetchSize = fetchSize).run()
    }
  }

  private def testF[F[_] : Concurrent](size: Int, take: Int, fetchSize: Int) = {

    type Row = Int

    val all = (0 until size).toList

    for {
      fetches <- Ref[F].of(0)
      left    <- Ref[F].of(all)
      fetched <- Ref[F].of(List.empty[Row])
      next     = fetched.modify { rows => (List.empty, rows) }
      fetch    = for {
        _        <- fetches.update(_ + 1)
        toFetch1 <- left.get
        result   <- {
          if (toFetch1.isEmpty) ().pure[F]
          else for {
            taken <- left.modify { rows =>
              val fetched = rows.take(fetchSize)
              val left = rows.drop(fetchSize)
              (left, fetched)
            }
            _    <- fetched.set(taken)
          } yield {}
        }
      } yield result
      resultSet   = ResultSet[F, Row](fetch, left.get.map(_.isEmpty), next)
      rows       <- resultSet.take(take.toLong).toList
      fetches    <- fetches.get
    } yield {
      rows shouldEqual all.take(take)

      if (take >= size) {
        val expected = {
          val n = size / fetchSize
          if (size % fetchSize == 0) n else n + 1
        }
        fetches shouldEqual expected
      }
    }
  }

  case object NotImplemented extends RuntimeException with NoStackTrace
} 
Example 57
Source File: CacheOf.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.replicator

import cats.Parallel
import cats.effect.{Concurrent, Resource, Timer}
import cats.implicits._
import com.evolutiongaming.catshelper.{BracketThrowable, Runtime}
import com.evolutiongaming.scache
import com.evolutiongaming.scache.{CacheMetrics, Releasable}
import com.evolutiongaming.skafka.Topic
import com.evolutiongaming.smetrics.MeasureDuration

import scala.concurrent.duration.FiniteDuration

trait CacheOf[F[_]] {

  def apply[K, V](topic: Topic): Resource[F, Cache[F, K, V]]
}

object CacheOf {

  def empty[F[_] : BracketThrowable]: CacheOf[F] = new CacheOf[F] {

    def apply[K, V](topic: Topic) = {

      val cache = new Cache[F, K, V] {

        def getOrUpdate(key: K)(value: => Resource[F, V]) = value.use(_.pure[F])

        def remove(key: K) = ().pure[F]
      }

      Resource.liftF(cache.pure[F])
    }
  }


  def apply[F[_] : Concurrent : Timer : Runtime : Parallel : MeasureDuration](
    expireAfter: FiniteDuration,
    cacheMetrics: Option[CacheMetrics.Name => CacheMetrics[F]]
  ): CacheOf[F] = {
    new CacheOf[F] {
      def apply[K, V](topic: Topic) = {
        for {
          cache <- scache.Cache.expiring[F, K, V](expireAfter)
          cache <- cacheMetrics.fold { Resource.liftF(cache.pure[F]) } { cacheMetrics => cache.withMetrics(cacheMetrics(topic)) }
        } yield {
          new Cache[F, K, V] {

            def getOrUpdate(key: K)(value: => Resource[F, V]) = {
              cache.getOrUpdateReleasable(key) { Releasable.of(value) }
            }

            def remove(key: K) = cache.remove(key).flatten.void
          }
        }
      }
    }
  }
} 
Example 58
Source File: KafkaSingletonTest.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.replicator

import cats.data.{NonEmptySet => Nes}
import cats.effect.concurrent.{Deferred, Ref}
import cats.effect.{Concurrent, IO, Resource, Timer}
import cats.implicits._
import com.evolutiongaming.catshelper.Log
import com.evolutiongaming.kafka.journal.IOSuite._
import com.evolutiongaming.skafka.consumer.RebalanceListener
import com.evolutiongaming.skafka.{Partition, TopicPartition}
import com.evolutiongaming.sstream.Stream
import org.scalatest.funsuite.AsyncFunSuite
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._

class KafkaSingletonTest extends AsyncFunSuite with Matchers {

  test("allocate & release when partition assigned or revoked") {
    `allocate & release when partition assigned or revoked`[IO]().run()
  }

  private def `allocate & release when partition assigned or revoked`[F[_] : Concurrent : Timer](): F[Unit] = {

    val topic = "topic"

    def consumer(deferred: Deferred[F, RebalanceListener[F]]) = {
      new TopicConsumer[F] {

        def subscribe(listener: RebalanceListener[F]) = deferred.complete(listener)

        def poll = Stream.empty

        def commit = TopicCommit.empty
      }
    }

    def topicPartition(partition: Partition) = TopicPartition(topic, partition)

    val result = for {
      listener  <- Resource.liftF(Deferred[F, RebalanceListener[F]])
      allocated <- Resource.liftF(Ref[F].of(false))
      resource   = Resource.make { allocated.set(true) } { _ => allocated.set(false) }
      singleton <- KafkaSingleton.of(topic, consumer(listener).pure[Resource[F, *]], resource, Log.empty[F])
      listener  <- Resource.liftF(listener.get)
      _         <- Resource.liftF {
        for {
          a <- singleton.get
          _  = a shouldEqual none[Unit]
          a <- allocated.get
          _  = a shouldEqual false
          _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.max)))
          a <- singleton.get
          _  = a shouldEqual none[Unit]
          a <- allocated.get
          _  = a shouldEqual false
          _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.min)))
          _ <- Timer[F].sleep(10.millis)
          a <- singleton.get
          _  = a shouldEqual ().some
          a <- allocated.get
          _  = a shouldEqual true
          _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.max)))
          a <- singleton.get
          _  = a shouldEqual ().some
          a <- allocated.get
          _  = a shouldEqual true
          _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.min)))
          _ <- Timer[F].sleep(10.millis)
          a <- singleton.get
          _  = a shouldEqual none[Unit]
          a <- allocated.get
          _  = a shouldEqual false
        } yield {}
      }
    } yield {}
    result.use { _ => ().pure[F] }
  }
} 
Example 59
Source File: HeadCacheFenced.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal

import cats.Apply
import cats.effect.concurrent.Ref
import cats.effect.{Concurrent, Resource}
import cats.implicits._
import com.evolutiongaming.catshelper.CatsHelper._
import com.evolutiongaming.skafka.{Offset, Partition}


object HeadCacheFenced {

  def of[F[_] : Concurrent](headCache: Resource[F, HeadCache[F]]): Resource[F, HeadCache[F]] = {

    val fence = Resource.make {
      Ref[F].of(().pure[F])
    } { fence =>
      fence.set(HeadCacheReleasedError.raiseError[F, Unit])
    }

    val result = for {
      headCache <- headCache
      fence     <- fence
    } yield {
      apply(headCache, fence.get.flatten)
    }
    result.fenced
  }

  def apply[F[_] : Apply](headCache: HeadCache[F], fence: F[Unit]): HeadCache[F] = {
    (key: Key, partition: Partition, offset: Offset) => {
      fence *> headCache.get(key, partition, offset)
    }
  }
} 
Example 60
Source File: HeadCacheOf.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal

import cats.{Applicative, Parallel}
import cats.implicits._
import cats.effect.{Concurrent, Resource, Timer}
import com.evolutiongaming.catshelper.{FromTry, LogOf}
import com.evolutiongaming.kafka.journal.eventual.EventualJournal
import com.evolutiongaming.skafka.consumer.ConsumerConfig
import com.evolutiongaming.smetrics.MeasureDuration

trait HeadCacheOf[F[_]] {

  def apply(
    consumerConfig: ConsumerConfig,
    eventualJournal: EventualJournal[F]
  ): Resource[F, HeadCache[F]]
}

object HeadCacheOf {

  def empty[F[_] : Applicative]: HeadCacheOf[F] = const(Resource.liftF(HeadCache.empty[F].pure[F]))


  def const[F[_]](value: Resource[F, HeadCache[F]]): HeadCacheOf[F] = {
    (_: ConsumerConfig, _: EventualJournal[F]) => value
  }
  

  def apply[F[_]](implicit F: HeadCacheOf[F]): HeadCacheOf[F] = F


  def apply[
    F[_] : Concurrent : Parallel : Timer : LogOf : KafkaConsumerOf : MeasureDuration : FromTry : FromAttempt :
    FromJsResult : JsonCodec.Decode
  ](
    metrics: Option[HeadCacheMetrics[F]]
  ): HeadCacheOf[F] = {
    (consumerConfig: ConsumerConfig, eventualJournal: EventualJournal[F]) => {
      for {
        headCache <- HeadCache.of[F](consumerConfig, eventualJournal, metrics)
        log       <- Resource.liftF(LogOf[F].apply(HeadCache.getClass))
      } yield {
        headCache.withLog(log)
      }
    }
  }
} 
Example 61
Source File: GracefulFiber.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal.util

import cats.effect.concurrent.Ref
import cats.effect.{Concurrent, Fiber}
import cats.implicits._


trait GracefulFiber[F[_]] {
  def apply[A](f: F[Boolean] => F[Fiber[F, A]]): F[Fiber[F, A]]
}

object GracefulFiber {

  def apply[F[_] : Concurrent]: GracefulFiber[F] = {

    new GracefulFiber[F] {

      def apply[A](f: F[Boolean] => F[Fiber[F, A]]) = {
        for {
          cancelRef <- Ref.of[F, Boolean](false)
          fiber     <- f(cancelRef.get)
        } yield {
          new Fiber[F, A] {

            def join = fiber.join

            def cancel = {
              for {
                cancel <- cancelRef.getAndSet(true)
                _      <- if (cancel) ().pure[F] else fiber.join
              } yield {}
            }
          }
        }
      }
    }
  }
} 
Example 62
Source File: IOSuite.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal

import cats.Parallel
import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer}
import cats.implicits._
import com.evolutiongaming.smetrics.MeasureDuration
import org.scalatest.Succeeded

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

object IOSuite {
  val Timeout: FiniteDuration = 5.seconds

  implicit val executor: ExecutionContextExecutor = ExecutionContext.global

  implicit val contextShiftIO: ContextShift[IO]     = IO.contextShift(executor)
  implicit val concurrentIO: Concurrent[IO]         = IO.ioConcurrentEffect
  implicit val timerIO: Timer[IO]                   = IO.timer(executor)
  implicit val parallel: Parallel[IO]               = IO.ioParallel
  implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock(Clock[IO])

  def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = {
    io.timeout(timeout).as(Succeeded).unsafeToFuture
  }

  implicit class IOOps[A](val self: IO[A]) extends AnyVal {
    def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout)
  }
} 
Example 63
Source File: commands.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats

import algebra._
import cats.effect.{ Concurrent, ContextShift }

trait RedisCommands[F[_], K, V]
    extends StringCommands[F, K, V]
    with HashCommands[F, K, V]
    with SetCommands[F, K, V]
    with SortedSetCommands[F, K, V]
    with ListCommands[F, K, V]
    with GeoCommands[F, K, V]
    with ConnectionCommands[F]
    with ServerCommands[F, K]
    with TransactionalCommands[F, K]
    with PipelineCommands[F]
    with ScriptCommands[F, K, V]
    with KeyCommands[F, K]

object RedisCommands {
  implicit class LiftKOps[F[_], K, V](val cmd: RedisCommands[F, K, V]) extends AnyVal {
    def liftK[G[_]: Concurrent: ContextShift]: RedisCommands[G, K, V] =
      cmd.asInstanceOf[BaseRedis[F, K, V]].liftK[G]
  }
} 
Example 64
Source File: QueueLogger.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.joex.scheduler

import cats.effect.{Concurrent, Sync}
import cats.implicits._
import fs2.concurrent.Queue

import docspell.common._

object QueueLogger {

  def create[F[_]: Sync](
      jobId: Ident,
      jobInfo: String,
      q: Queue[F, LogEvent]
  ): Logger[F] =
    new Logger[F] {
      def trace(msg: => String): F[Unit] =
        LogEvent.create[F](jobId, jobInfo, LogLevel.Debug, msg).flatMap(q.enqueue1)

      def debug(msg: => String): F[Unit] =
        LogEvent.create[F](jobId, jobInfo, LogLevel.Debug, msg).flatMap(q.enqueue1)

      def info(msg: => String): F[Unit] =
        LogEvent.create[F](jobId, jobInfo, LogLevel.Info, msg).flatMap(q.enqueue1)

      def warn(msg: => String): F[Unit] =
        LogEvent.create[F](jobId, jobInfo, LogLevel.Warn, msg).flatMap(q.enqueue1)

      def error(ex: Throwable)(msg: => String): F[Unit] =
        LogEvent
          .create[F](jobId, jobInfo, LogLevel.Error, msg)
          .map(le => le.copy(ex = Some(ex)))
          .flatMap(q.enqueue1)

      def error(msg: => String): F[Unit] =
        LogEvent.create[F](jobId, jobInfo, LogLevel.Error, msg).flatMap(q.enqueue1)
    }

  def apply[F[_]: Concurrent](
      jobId: Ident,
      jobInfo: String,
      bufferSize: Int,
      sink: LogSink[F]
  ): F[Logger[F]] =
    for {
      q <- Queue.circularBuffer[F, LogEvent](bufferSize)
      log = create(jobId, jobInfo, q)
      _ <- Concurrent[F].start(q.dequeue.through(sink.receive).compile.drain)
    } yield log

} 
Example 65
Source File: LogSink.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.joex.scheduler

import cats.effect.{Concurrent, Sync}
import cats.implicits._
import fs2.{Pipe, Stream}

import docspell.common._
import docspell.common.syntax.all._
import docspell.store.Store
import docspell.store.records.RJobLog

import org.log4s.{LogLevel => _, _}

trait LogSink[F[_]] {

  def receive: Pipe[F, LogEvent, Unit]

}

object LogSink {
  private[this] val logger = getLogger

  def apply[F[_]](sink: Pipe[F, LogEvent, Unit]): LogSink[F] =
    new LogSink[F] {
      val receive = sink
    }

  def logInternal[F[_]: Sync](e: LogEvent): F[Unit] =
    e.level match {
      case LogLevel.Info =>
        logger.finfo(e.logLine)
      case LogLevel.Debug =>
        logger.fdebug(e.logLine)
      case LogLevel.Warn =>
        logger.fwarn(e.logLine)
      case LogLevel.Error =>
        e.ex match {
          case Some(exc) =>
            logger.ferror(exc)(e.logLine)
          case None =>
            logger.ferror(e.logLine)
        }
    }

  def printer[F[_]: Sync]: LogSink[F] =
    LogSink(_.evalMap(e => logInternal(e)))

  def db[F[_]: Sync](store: Store[F]): LogSink[F] =
    LogSink(
      _.evalMap(ev =>
        for {
          id <- Ident.randomId[F]
          joblog = RJobLog(
            id,
            ev.jobId,
            ev.level,
            ev.time,
            ev.msg + ev.ex.map(th => ": " + th.getMessage).getOrElse("")
          )
          _ <- logInternal(ev)
          _ <- store.transact(RJobLog.insert(joblog))
        } yield ()
      )
    )

  def dbAndLog[F[_]: Concurrent](store: Store[F]): LogSink[F] = {
    val s: Stream[F, Pipe[F, LogEvent, Unit]] =
      Stream.emits(Seq(printer[F].receive, db[F](store).receive))
    LogSink(Pipe.join(s))
  }
} 
Example 66
Source File: RollingFileLogger.scala    From odin   with Apache License 2.0 4 votes vote down vote up
package io.odin.loggers

import java.nio.file.{Files, Path, Paths}
import java.time.{Instant, LocalDateTime}
import java.time.format.DateTimeFormatter
import java.util.TimeZone
import java.util.concurrent.TimeUnit

import cats.Monad
import cats.effect.concurrent.Ref
import cats.effect.{Concurrent, ContextShift, Fiber, Resource, Timer}
import cats.syntax.all._
import io.odin.formatter.Formatter
import io.odin.{Level, Logger, LoggerMessage}

import scala.concurrent.duration.{FiniteDuration, _}

object RollingFileLogger {

  def apply[F[_]](
      fileNamePattern: LocalDateTime => String,
      maxFileSizeInBytes: Option[Long],
      rolloverInterval: Option[FiniteDuration],
      formatter: Formatter,
      minLevel: Level
  )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]): Resource[F, Logger[F]] = {
    new RollingFileLoggerFactory(
      fileNamePattern,
      maxFileSizeInBytes,
      rolloverInterval,
      formatter,
      minLevel,
      FileLogger.apply[F]
    ).mk
  }

  private[odin] class RefLogger[F[_]: Timer: Monad](
      current: Ref[F, Logger[F]],
      override val minLevel: Level
  ) extends DefaultLogger[F](minLevel) {

    def log(msg: LoggerMessage): F[Unit] = current.get.flatMap(_.log(msg))

    override def log(msgs: List[LoggerMessage]): F[Unit] = current.get.flatMap(_.log(msgs))

  }

  private[odin] class RollingFileLoggerFactory[F[_]](
      fileNamePattern: LocalDateTime => String,
      maxFileSizeInBytes: Option[Long],
      rolloverInterval: Option[FiniteDuration],
      formatter: Formatter,
      minLevel: Level,
      underlyingLogger: (String, Formatter, Level) => Resource[F, Logger[F]],
      fileSizeCheck: Path => Long = Files.size
  )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]) {

    val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss")

    def mk: Resource[F, Logger[F]] = {
      val logger = for {
        ((logger, watcherFiber), release) <- allocate.allocated
        refLogger <- Ref.of(logger)
        refRelease <- Ref.of(release)
        _ <- F.start(rollingLoop(watcherFiber, refLogger, refRelease))
      } yield {
        (new RefLogger(refLogger, minLevel), refRelease)
      }
      Resource.make(logger)(_._2.get.flatten).map {
        case (logger, _) => logger
      }
    }

    def now: F[Long] = timer.clock.realTime(TimeUnit.MILLISECONDS)

    
    def rollingLoop(watcher: Fiber[F, Unit], logger: Ref[F, Logger[F]], release: Ref[F, F[Unit]]): F[Unit] =
      for {
        _ <- watcher.join
        oldRelease <- release.get
        ((newLogger, newWatcher), newRelease) <- allocate.allocated
        _ <- logger.set(newLogger)
        _ <- release.set(newRelease)
        _ <- oldRelease
        _ <- rollingLoop(newWatcher, logger, release)
      } yield ()

  }

}