org.apache.kafka.clients.consumer.OffsetAndMetadata Scala Examples

The following examples show how to use org.apache.kafka.clients.consumer.OffsetAndMetadata. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: PulsarSinkTask.scala    From stream-reactor   with Apache License 2.0 7 votes vote down vote up
package com.datamountaineer.streamreactor.connect.pulsar.sink

import java.util
import java.util.UUID

import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.pulsar.config.{PulsarConfigConstants, PulsarSinkConfig, PulsarSinkSettings}
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._



  override def stop(): Unit = {
    logger.info("Stopping Pulsar sink.")
    writer.foreach(w => w.close)
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
    require(writer.nonEmpty, "Writer is not set!")
    writer.foreach(w => w.flush)
  }

  override def version: String = manifest.version()
} 
Example 2
Source File: ElasticSinkTask.scala    From stream-reactor   with Apache License 2.0 6 votes vote down vote up
package com.datamountaineer.streamreactor.connect.elastic6

import java.util

import com.datamountaineer.streamreactor.connect.elastic6.config.{ElasticConfig, ElasticConfigConstants, ElasticSettings}
import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._

class ElasticSinkTask extends SinkTask with StrictLogging {
  private var writer: Option[ElasticJsonWriter] = None
  private val progressCounter = new ProgressCounter
  private var enableProgress: Boolean = false
  private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)

  
  override def stop(): Unit = {
    logger.info("Stopping Elastic sink.")
    writer.foreach(w => w.close())
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
    logger.info("Flushing Elastic Sink")
  }

  override def version: String = manifest.version()
} 
Example 3
Source File: KuduSinkTask.scala    From stream-reactor   with Apache License 2.0 6 votes vote down vote up
package com.datamountaineer.streamreactor.connect.kudu.sink

import java.util

import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.kudu.config.{KuduConfig, KuduConfigConstants, KuduSettings}
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._


  override def stop(): Unit = {
    logger.info("Stopping Kudu sink.")
    writer.foreach(w => w.close())
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
    require(writer.nonEmpty, "Writer is not set!")
    writer.foreach(w => w.flush())
  }

  override def version: String = manifest.version()
} 
Example 4
Source File: JMSSinkTask.scala    From stream-reactor   with Apache License 2.0 6 votes vote down vote up
package com.datamountaineer.streamreactor.connect.jms.sink

import java.util

import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.jms.config.{JMSConfig, JMSConfigConstants, JMSSettings}
import com.datamountaineer.streamreactor.connect.jms.sink.writer.JMSWriter
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._


  override def stop(): Unit = {
    logger.info("Stopping JMS sink.")
    writer.foreach(w => w.close())
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
    //TODO
    //have the writer expose a is busy; can expose an await using a countdownlatch internally
  }

  override def version: String = manifest.version()
} 
Example 5
Source File: CassandraSinkTask.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.cassandra.sink

import java.util

import com.datamountaineer.streamreactor.connect.cassandra.config.{CassandraConfigSink, CassandraSettings}
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}



  override def stop(): Unit = {
    logger.info("Stopping Cassandra sink.")
    writer.foreach(w => w.close())
    if (enableProgress) {
      progressCounter.empty
    }
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}

  override def version: String = manifest.version()
} 
Example 6
Source File: TwitterSinkTask.scala    From kafka-connect-twitter   with Apache License 2.0 5 votes vote down vote up
package com.eneco.trading.kafka.connect.twitter

import java.util
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}
import scala.collection.JavaConverters._
import scala.util.{Success, Failure}

class TwitterSinkTask extends SinkTask with Logging {
  var writer: Option[SimpleTwitterWriter] = None

  override def start(props: util.Map[String, String]): Unit = {
    val sinkConfig = new TwitterSinkConfig(props)
    writer = Some(new TwitterWriter(
      sinkConfig.getString(TwitterSinkConfig.CONSUMER_KEY_CONFIG),
      sinkConfig.getPassword(TwitterSinkConfig.CONSUMER_SECRET_CONFIG).value,
      sinkConfig.getString(TwitterSinkConfig.TOKEN_CONFIG),
      sinkConfig.getPassword(TwitterSinkConfig.SECRET_CONFIG).value))
  }

  override def put(records: util.Collection[SinkRecord]): Unit =
    records.asScala
      .map(_.value.toString)
      .map(text => (text, writer match {
        case Some(writer) => writer.updateStatus(text)
        case None => Failure(new IllegalStateException("twitter writer is not set"))
      }))
      .foreach {
        case (text, result) => result match {
          case Success(id) => log.info(s"successfully tweeted `${text}`; got assigned id ${id}")
          case Failure(err) => log.warn(s"tweeting `${text}` failed: ${err.getMessage}")
        }
      }

  override def stop(): Unit = {
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]) = {
  }
  override def version(): String = ""
} 
Example 7
Source File: TwitterSinkTask.scala    From kafka-tweet-producer   with Apache License 2.0 5 votes vote down vote up
package com.eneco.trading.kafka.connect.twitter

import java.util
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}
import scala.collection.JavaConverters._
import scala.util.{Success, Failure}

class TwitterSinkTask extends SinkTask with Logging {
  var writer: Option[SimpleTwitterWriter] = None

  override def start(props: util.Map[String, String]): Unit = {
    val sinkConfig = new TwitterSinkConfig(props)
    writer = Some(new TwitterWriter(
      sinkConfig.getString(TwitterSinkConfig.CONSUMER_KEY_CONFIG),
      sinkConfig.getPassword(TwitterSinkConfig.CONSUMER_SECRET_CONFIG).value,
      sinkConfig.getString(TwitterSinkConfig.TOKEN_CONFIG),
      sinkConfig.getPassword(TwitterSinkConfig.SECRET_CONFIG).value))
  }

  override def put(records: util.Collection[SinkRecord]): Unit =
    records.asScala
      .map(_.value.toString)
      .map(text => (text, writer match {
        case Some(writer) => writer.updateStatus(text)
        case None => Failure(new IllegalStateException("twitter writer is not set"))
      }))
      .foreach {
        case (text, result) => result match {
          case Success(id) => log.info(s"successfully tweeted `${text}`; got assigned id ${id}")
          case Failure(err) => log.warn(s"tweeting `${text}` failed: ${err.getMessage}")
        }
      }

  override def stop(): Unit = {
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]) = {
  }
  override def version(): String = ""
} 
Example 8
Source File: CoapSinkTask.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.coap.sink

import java.util

import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSettings, CoapSinkConfig}
import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._
import scala.collection.mutable


class CoapSinkTask extends SinkTask with StrictLogging {
  private val writers = mutable.Map.empty[String, CoapWriter]
  private val progressCounter = new ProgressCounter
  private var enableProgress: Boolean = false
  private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)

  override def start(props: util.Map[String, String]): Unit = {
    logger.info(scala.io.Source.fromInputStream(getClass.getResourceAsStream("/coap-sink-ascii.txt")).mkString + s" $version")
    logger.info(manifest.printManifest())

    val conf = if (context.configs().isEmpty) props else context.configs()

    val sinkConfig = CoapSinkConfig(conf)
    enableProgress = sinkConfig.getBoolean(CoapConstants.PROGRESS_COUNTER_ENABLED)
    val settings = CoapSettings(sinkConfig)

    //if error policy is retry set retry interval
    if (settings.head.errorPolicy.getOrElse(ErrorPolicyEnum.THROW).equals(ErrorPolicyEnum.RETRY)) {
      context.timeout(sinkConfig.getString(CoapConstants.ERROR_RETRY_INTERVAL).toLong)
    }
    settings.map(s => (s.kcql.getSource, CoapWriter(s))).map({ case (k, v) => writers.put(k, v) })
  }

  override def put(records: util.Collection[SinkRecord]): Unit = {
    records.asScala.map(r => writers(r.topic()).write(List(r)))
    val seq = records.asScala.toVector
    if (enableProgress) {
      progressCounter.update(seq)
    }
  }

  override def stop(): Unit = {
    writers.foreach({ case (t, w) =>
      logger.info(s"Shutting down writer for $t")
      w.stop()
    })
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}

  override def version: String = manifest.version()

} 
Example 9
Source File: HazelCastSinkTask.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.hazelcast.sink

import java.util

import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.hazelcast.config.{HazelCastSinkConfig, HazelCastSinkConfigConstants, HazelCastSinkSettings}
import com.datamountaineer.streamreactor.connect.hazelcast.writers.HazelCastWriter
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._


  override def stop(): Unit = {
    logger.info("Stopping Hazelcast sink.")
    writer.foreach(w => w.close())
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
    require(writer.nonEmpty, "Writer is not set!")
    writer.foreach(w => w.flush())
  }

  override def version: String = manifest.version()
} 
Example 10
Source File: MqttSinkTask.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.mqtt.sink

import java.util

import com.datamountaineer.streamreactor.connect.converters.sink.Converter
import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.mqtt.config.{MqttConfigConstants, MqttSinkConfig, MqttSinkSettings}
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}



  override def stop(): Unit = {
    logger.info("Stopping Mqtt sink.")
    writer.foreach(w => w.close)
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
    require(writer.nonEmpty, "Writer is not set!")
    writer.foreach(w => w.flush)
  }

  override def version: String = manifest.version()
} 
Example 11
Source File: CommitMarkerOffsetsActor.scala    From kmq   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.kmq.redelivery

import akka.actor.Actor
import com.softwaremill.kmq.KafkaClients
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.ByteArrayDeserializer

import scala.collection.JavaConverters._
import scala.concurrent.duration._

class CommitMarkerOffsetsActor(markerTopic: String, clients: KafkaClients) extends Actor with StrictLogging {

  private val consumer = clients.createConsumer(null, classOf[ByteArrayDeserializer], classOf[ByteArrayDeserializer])

  private var toCommit: Map[Partition, Offset] = Map()

  import context.dispatcher

  override def preStart(): Unit = {
    logger.info("Started commit marker offsets actor")
  }

  override def postStop(): Unit = {
    try consumer.close()
    catch {
      case e: Exception => logger.error("Cannot close commit offsets consumer", e)
    }

    logger.info("Stopped commit marker offsets actor")
  }

  override def receive: Receive = {
    case CommitOffset(p, o) =>
      // only updating if the current offset is smaller
      if (toCommit.get(p).fold(true)(_ < o))
        toCommit += p -> o

    case DoCommit =>
      try {
        commitOffsets()
        toCommit = Map()
      } finally context.system.scheduler.scheduleOnce(1.second, self, DoCommit)
  }

  private def commitOffsets(): Unit = if (toCommit.nonEmpty) {
    consumer.commitSync(toCommit.map { case (partition, offset) =>
      (new TopicPartition(markerTopic, partition), new OffsetAndMetadata(offset))
    }.asJava)

    logger.debug(s"Committed marker offsets: $toCommit")
  }
} 
Example 12
Source File: MongoSinkTask.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.mongodb.sink

import java.util

import com.datamountaineer.streamreactor.connect.mongodb.config.{MongoConfig, MongoConfigConstants}
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}


  override def put(records: util.Collection[SinkRecord]): Unit = {
    require(writer.nonEmpty, "Writer is not set!")
    val seq = records.asScala.toVector
    writer.foreach(w => w.write(seq))

    if (enableProgress) {
      progressCounter.update(seq)
    }
  }

  override def stop(): Unit = {
    logger.info("Stopping Mongo Database sink.")
    writer.foreach(w => w.close())
    progressCounter.empty
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}

  override def version: String = manifest.version()
} 
Example 13
Source File: DocumentDbSinkTask.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.azure.documentdb.sink

import java.util

import com.datamountaineer.streamreactor.connect.azure.documentdb.DocumentClientProvider
import com.datamountaineer.streamreactor.connect.azure.documentdb.config.{DocumentDbConfig, DocumentDbConfigConstants, DocumentDbSinkSettings}
import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter}
import com.microsoft.azure.documentdb.DocumentClient
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}


  override def put(records: util.Collection[SinkRecord]): Unit = {
    require(writer.nonEmpty, "Writer is not set!")
    val seq = records.asScala.toVector
    writer.foreach(w => w.write(seq))

    if (enableProgress) {
      progressCounter.update(seq)
    }
  }

  override def stop(): Unit = {
    logger.info("Stopping Azure Document DB sink.")
    writer.foreach(w => w.close())
    progressCounter.empty()
  }

  override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}

  override def version: String = manifest.version()
} 
Example 14
Source File: GenericSinkTask.scala    From kafka-connect-sap   with Apache License 2.0 5 votes vote down vote up
package com.sap.kafka.connect.sink

import java.util

import com.sap.kafka.connect.config.BaseConfig
import com.sap.kafka.utils.ConnectorException
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}
import org.slf4j.Logger


abstract class GenericSinkTask extends SinkTask with SinkWriter   {
    
    override def put(records: util.Collection[SinkRecord]): Unit = {
      log.info(s"PHASE - 1 - get records from kafka, Started for task with assigned " +
        s"partitions ${this.context.assignment().toString} ")
      log.info(s"Number of Records read for Sink: ${records.size}")
      retriesLeft = config.maxRetries
      if (records.isEmpty) {
        return
      }
      val recordsCount: Int = records.size
      log.trace("Received {} records for Sink", recordsCount)
      try {
        writer.write(records)
      } catch  {
        case exception : ConnectorException =>
          log.error("Write of {} records failed, remainingRetries={}", records.size(), retriesLeft)
          while (retriesLeft > 0) {
            try {
              retriesLeft = retriesLeft - 1
              writer.close()
              writer = initWriter(config)
              writer.write(records)
              retriesLeft = -1
            } catch {
              case exception: ConnectorException =>
                // ignore
            }
          }

          if (retriesLeft == 0)
            throw exception
      } finally {
        log.info(s"PHASE - 1 ended for task, with assigned partitions ${this.context.assignment().toString}")
      }
    }


    override def stop(): Unit = {
      log.info("Stopping task")
      writer.close()
    }

    override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]) : Unit = {

    }

    override def version(): String = getClass.getPackage.getImplementationVersion


} 
Example 15
Source File: ProducerOps.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import cats.{Applicative, Foldable, MonadError, Traverse}
import cats.implicits._
import fs2._
import org.apache.kafka.common._
import org.apache.kafka.common.errors._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._

case class ProducerOps[F[_], K, V](producer: ProducerApi[F, K, V]) {

  def sendAndForgetBatch[G[_]: Foldable](
      records: G[ProducerRecord[K, V]]
  )(implicit F: Applicative[F]): F[Unit] =
    records.traverse_(producer.sendAndForget)

  def sendSyncBatch[G[_]: Traverse](
      records: G[ProducerRecord[K, V]]
  )(implicit F: Applicative[F]): F[G[RecordMetadata]] =
    records.traverse(producer.sendSync)

  def sendAsyncBatch[G[_]: Traverse](
      records: G[ProducerRecord[K, V]]
  )(implicit F: Applicative[F]): F[G[RecordMetadata]] =
    records.traverse(producer.sendAsync)

  def pipeSync: Pipe[F, ProducerRecord[K, V], RecordMetadata] =
    _.evalMap(producer.sendSync)

  def pipeAsync: Pipe[F, ProducerRecord[K, V], RecordMetadata] =
    _.evalMap(producer.sendAsync)

  def sink: Pipe[F, ProducerRecord[K, V], Unit] =
    _.evalMap(producer.sendAndForget)

  def sinkSync: Pipe[F, ProducerRecord[K, V], Unit] =
    pipeSync.apply(_).void

  def sinkAsync: Pipe[F, ProducerRecord[K, V], Unit] =
    pipeAsync.apply(_).void

  def transaction[G[_]: Foldable](
      records: G[ProducerRecord[K, V]],
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  )(implicit F: MonadError[F, Throwable]): F[Unit] =
    (for {
      _ <- producer.beginTransaction
      _ <- sendAndForgetBatch(records) //should be no need to wait for RecordMetadatas or errors, since commitTransaction flushes and throws
      _ <- producer.sendOffsetsToTransaction(offsets, consumerGroupId)
      _ <- producer.commitTransaction
    } yield ()).handleErrorWith {
      // Exception-handling described in https://kafka.apache.org/10/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#send-org.apache.kafka.clients.producer.ProducerRecord-org.apache.kafka.clients.producer.Callback-
      case e: ProducerFencedException => F.raiseError(e)
      case e: OutOfOrderSequenceException => F.raiseError(e)
      case e: UnsupportedVersionException => F.raiseError(e)
      case e: AuthorizationException => F.raiseError(e)
      case _ => producer.abortTransaction
    }
}

import org.apache.avro.generic.GenericRecord
import com.sksamuel.avro4s.ToRecord

case class GenericProducerOps[F[_]](producer: ProducerApi[F, GenericRecord, GenericRecord]) {

  def toAvro4s[K: ToRecord, V: ToRecord]: ProducerApi[F, K, V] =
    Avro4sProducerImpl[F, K, V](producer)

} 
Example 16
Source File: ShiftingProducerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import cats.effect.{Async, ContextShift}
import java.util.concurrent.{Future => JFuture}

import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._

import scala.concurrent.ExecutionContext

case class ShiftingProducerImpl[F[_]: Async, K, V](
    p: ProducerApi[F, K, V],
    blockingContext: ExecutionContext
)(implicit CS: ContextShift[F])
    extends ProducerApi[F, K, V] {
  def abortTransaction: F[Unit] = CS.evalOn(blockingContext)(p.abortTransaction)
  def beginTransaction: F[Unit] = CS.evalOn(blockingContext)(p.beginTransaction)
  def close: F[Unit] = CS.evalOn(blockingContext)(p.close)
  def close(timeout: FiniteDuration): F[Unit] = CS.evalOn(blockingContext)(p.close(timeout))
  def commitTransaction: F[Unit] = CS.evalOn(blockingContext)(p.commitTransaction)
  def flush: F[Unit] = CS.evalOn(blockingContext)(p.flush)
  def initTransactions: F[Unit] = CS.evalOn(blockingContext)(p.initTransactions)
  def metrics: F[Map[MetricName, Metric]] = CS.evalOn(blockingContext)(p.metrics)
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    CS.evalOn(blockingContext)(p.partitionsFor(topic))
  def sendOffsetsToTransaction(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  ): F[Unit] =
    CS.evalOn(blockingContext)(p.sendOffsetsToTransaction(offsets, consumerGroupId))

  private[producer] def sendRaw(record: ProducerRecord[K, V]): JFuture[RecordMetadata] =
    p.sendRaw(record)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Callback
  ): JFuture[RecordMetadata] = p.sendRaw(record, callback)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Either[Exception, RecordMetadata] => Unit
  ): Unit = p.sendRaw(record, callback)

  def sendAndForget(record: ProducerRecord[K, V]): F[Unit] =
    CS.evalOn(blockingContext)(p.sendAndForget(record))
  def sendSync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    CS.evalOn(blockingContext)(p.sendSync(record))
  def sendAsync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    CS.evalOn(blockingContext)(p.sendAsync(record))
} 
Example 17
Source File: ProducerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import scala.collection.compat._
import cats.implicits._
import cats.effect.Async
import java.util.concurrent.{Future => JFuture}
import scala.jdk.CollectionConverters._
import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._

case class ProducerImpl[F[_], K, V](p: Producer[K, V])(implicit F: Async[F])
    extends ProducerApi[F, K, V] {
  def abortTransaction: F[Unit] = F.delay(p.abortTransaction())
  def beginTransaction: F[Unit] = F.delay(p.beginTransaction())
  def close: F[Unit] = F.delay(p.close())
  def close(timeout: FiniteDuration): F[Unit] =
    F.delay(p.close(java.time.Duration.ofMillis(timeout.toMillis)))
  def commitTransaction: F[Unit] = F.delay(p.commitTransaction())
  def flush: F[Unit] = F.delay(p.flush())
  def initTransactions: F[Unit] = F.delay(p.initTransactions())
  def metrics: F[Map[MetricName, Metric]] = F.delay(p.metrics().asScala.toMap)
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] =
    F.delay(p.partitionsFor(topic).asScala.toSeq)
  def sendOffsetsToTransaction(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  ): F[Unit] =
    F.delay(p.sendOffsetsToTransaction(offsets.asJava, consumerGroupId))

  private[producer] def sendRaw(record: ProducerRecord[K, V]): JFuture[RecordMetadata] =
    p.send(record)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Callback
  ): JFuture[RecordMetadata] = p.send(record, callback)

  
  def sendAsync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    F.async(sendRaw(record, _))
}

object ProducerImpl {
  //returns the type expected when creating a Resource
  def create[F[_]: Async, K, V](
      p: Producer[K, V]
  ): ProducerApi[F, K, V] =
    ProducerImpl(p)
} 
Example 18
Source File: Avro4sProducerImpl.scala    From kafka4s   with Apache License 2.0 5 votes vote down vote up
package com.banno.kafka.producer

import java.util.concurrent.{Future => JFuture}
import scala.concurrent.duration._
import org.apache.kafka.common._
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer._
import org.apache.avro.generic.GenericRecord
import com.sksamuel.avro4s.ToRecord
import com.banno.kafka._

//this is like Bifunctor[ProducerApi] but is contravariant in both arguments, cats does not seem to have anything like ContravriantBifunctor...

case class Avro4sProducerImpl[F[_], K: ToRecord, V: ToRecord](
    p: ProducerApi[F, GenericRecord, GenericRecord]
) extends ProducerApi[F, K, V] {
  def abortTransaction: F[Unit] = p.abortTransaction
  def beginTransaction: F[Unit] = p.beginTransaction
  def close: F[Unit] = p.close
  def close(timeout: FiniteDuration): F[Unit] = p.close(timeout)
  def commitTransaction: F[Unit] = p.commitTransaction
  def flush: F[Unit] = p.flush
  def initTransactions: F[Unit] = p.initTransactions
  def metrics: F[Map[MetricName, Metric]] = p.metrics
  def partitionsFor(topic: String): F[Seq[PartitionInfo]] = p.partitionsFor(topic)
  def sendOffsetsToTransaction(
      offsets: Map[TopicPartition, OffsetAndMetadata],
      consumerGroupId: String
  ): F[Unit] = p.sendOffsetsToTransaction(offsets, consumerGroupId)

  private[producer] def sendRaw(record: ProducerRecord[K, V]): JFuture[RecordMetadata] =
    p.sendRaw(record.toGenericRecord)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Callback
  ): JFuture[RecordMetadata] = p.sendRaw(record.toGenericRecord, callback)
  private[producer] def sendRaw(
      record: ProducerRecord[K, V],
      callback: Either[Exception, RecordMetadata] => Unit
  ): Unit =
    p.sendRaw(record.toGenericRecord, callback)

  def sendAndForget(record: ProducerRecord[K, V]): F[Unit] = p.sendAndForget(record.toGenericRecord)
  def sendSync(record: ProducerRecord[K, V]): F[RecordMetadata] = p.sendSync(record.toGenericRecord)
  def sendAsync(record: ProducerRecord[K, V]): F[RecordMetadata] =
    p.sendAsync(record.toGenericRecord)
} 
Example 19
Source File: IotHubSinkTask.scala    From toketi-kafka-connect-iothub   with MIT License 5 votes vote down vote up
package com.microsoft.azure.iot.kafka.connect.sink

import java.util

import com.microsoft.azure.iot.kafka.connect.source.JsonSerialization
import com.microsoft.azure.sdk.iot.service.{DeliveryAcknowledgement, Message}
import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}

import scala.collection.JavaConverters._

class IotHubSinkTask extends SinkTask with LazyLogging with JsonSerialization {

  // Protected for testing purposes
  protected     var messageSender     : Option[MessageSender]                = None
  protected     var acknowledgement   : DeliveryAcknowledgement              = DeliveryAcknowledgement.None
  private[this] var isClosing         : Boolean                              = false

  override def stop(): Unit = {
    logger.info("Stopping IotHubSink Task")
    if (this.messageSender.isDefined && !this.isClosing) {
      this.messageSender.synchronized {
        if (!this.isClosing) {
          this.isClosing = true
          logger.info("Closing IotHub clients")
          this.messageSender.get.close()
        }
      }
    }
  }

  override def put(records: util.Collection[SinkRecord]): Unit = {
    if (this.messageSender.isDefined && !this.isClosing) {
      this.messageSender.synchronized {
        if (!this.isClosing) {
          logger.info(s"Received ${records.size()} messages to be sent to devices. ")
          for (record: SinkRecord ← records.asScala) {
            val c2DMessage = C2DMessageConverter.validateSchemaAndGetMessage(record)
            this.sendMessage(c2DMessage)
          }
          logger.info(s"Started tasks to send ${records.size()} messages to devices.")
        }
      }
    } else {
      logger.info(s"Unable to send messages to devices - MessageSender is undefined " +
        s"= ${messageSender.isEmpty.toString}, isClosing = ${this.isClosing.toString}")
    }
  }

  private def sendMessage(c2DMessage: C2DMessage): Unit = {
    logger.info(s"Sending c2d message ${c2DMessage.toString}")
    val message = new Message(c2DMessage.message)
    message.setMessageId(c2DMessage.messageId)
    message.setDeliveryAcknowledgement(acknowledgement)
    if (c2DMessage.expiryTime.isDefined) {
      message.setExpiryTimeUtc(c2DMessage.expiryTime.get)
    }
    this.messageSender.get.sendMessage(c2DMessage.deviceId, message)
  }

  override def flush(offsets: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {}

  override def start(props: util.Map[String, String]): Unit = {
    logger.info("Starting IotHub Sink")
    val connectionString = props.get(IotHubSinkConfig.IotHubConnectionString)
    this.messageSender = Some(this.getMessageSender(connectionString))
    this.acknowledgement =
      DeliveryAcknowledgement.valueOf(props.get(IotHubSinkConfig.IotHubMessageDeliveryAcknowledgement))
  }

  protected def getMessageSender(connectionString: String): MessageSender = {
    new IotHubMessageSender(connectionString)
  }

  override def version(): String = getClass.getPackage.getImplementationVersion
} 
Example 20
Source File: DiagnosticEvent.scala    From zio-kafka   with Apache License 2.0 5 votes vote down vote up
package zio.kafka.consumer.diagnostics

import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition

sealed trait DiagnosticEvent
object DiagnosticEvent {
  case class Poll(tpRequested: Set[TopicPartition], tpWithData: Set[TopicPartition], tpWithoutData: Set[TopicPartition])
      extends DiagnosticEvent
  case class Request(partition: TopicPartition) extends DiagnosticEvent

  sealed trait Commit extends DiagnosticEvent
  object Commit {
    case class Started(offsets: Map[TopicPartition, Long])                                extends Commit
    case class Success(offsets: Map[TopicPartition, OffsetAndMetadata])                   extends Commit
    case class Failure(offsets: Map[TopicPartition, OffsetAndMetadata], cause: Throwable) extends Commit
  }

  sealed trait Rebalance extends DiagnosticEvent
  object Rebalance {
    case class Revoked(partitions: Set[TopicPartition])  extends Rebalance
    case class Assigned(partitions: Set[TopicPartition]) extends Rebalance
  }
}