kafka.server.KafkaServerStartable Scala Examples

The following examples show how to use kafka.server.KafkaServerStartable. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: KafkaServer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.env

import java.io.File
import java.net.InetSocketAddress
import java.nio.file.{Files, Paths}
import java.util.Properties

import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.commons.io.FileUtils
import org.apache.zookeeper.server.quorum.QuorumPeerConfig
import org.apache.zookeeper.server.{ServerConfig, ZooKeeperServerMain}


object KafkaServer extends App {

  val zookeeperPort = 2181

  val kafkaLogs = "/tmp/kafka-logs"
  val kafkaLogsPath = Paths.get(kafkaLogs)

  // See: https://stackoverflow.com/questions/59592518/kafka-broker-doesnt-find-cluster-id-and-creates-new-one-after-docker-restart/60864763#comment108382967_60864763
  def fix25Behaviour() = {
    val fileWithConflictingContent = kafkaLogsPath.resolve("meta.properties").toFile
    if (fileWithConflictingContent.exists())  FileUtils.forceDelete(fileWithConflictingContent)
  }

  def removeKafkaLogs(): Unit = {
    if (kafkaLogsPath.toFile.exists()) FileUtils.forceDelete(kafkaLogsPath.toFile)
  }

  // Keeps the persistent data
  fix25Behaviour()
  // If everything fails
  //removeKafkaLogs()

  val quorumConfiguration = new QuorumPeerConfig {
    // Since we do not run a cluster, we are not interested in zookeeper data
    override def getDataDir: File = Files.createTempDirectory("zookeeper").toFile
    override def getDataLogDir: File = Files.createTempDirectory("zookeeper-logs").toFile
    override def getClientPortAddress: InetSocketAddress = new InetSocketAddress(zookeeperPort)
  }

  class StoppableZooKeeperServerMain extends ZooKeeperServerMain {
    def stop(): Unit = shutdown()
  }

  val zooKeeperServer = new StoppableZooKeeperServerMain()

  val zooKeeperConfig = new ServerConfig()
  zooKeeperConfig.readFrom(quorumConfiguration)

  val zooKeeperThread = new Thread {
    override def run(): Unit = zooKeeperServer.runFromConfig(zooKeeperConfig)
  }

  zooKeeperThread.start()

  val kafkaProperties = new Properties()
  kafkaProperties.put("zookeeper.connect", s"localhost:$zookeeperPort")
  kafkaProperties.put("broker.id", "0")
  kafkaProperties.put("offsets.topic.replication.factor", "1")
  kafkaProperties.put("log.dirs", kafkaLogs)
  kafkaProperties.put("delete.topic.enable", "true")
  kafkaProperties.put("group.initial.rebalance.delay.ms", "0")
  kafkaProperties.put("transaction.state.log.min.isr", "1")
  kafkaProperties.put("transaction.state.log.replication.factor", "1")
  kafkaProperties.put("zookeeper.connection.timeout.ms", "6000")
  kafkaProperties.put("num.partitions", "10")

  val kafkaConfig = KafkaConfig.fromProps(kafkaProperties)

  val kafka = new KafkaServerStartable(kafkaConfig)

  println("About to start...")
  kafka.startup()

  scala.sys.addShutdownHook{
    println("About to shutdown...")
    kafka.shutdown()
    kafka.awaitShutdown()
    zooKeeperServer.stop()
  }

  zooKeeperThread.join()
} 
Example 2
Source File: KafkaTestUtils.scala    From spark-kafka-writer   with Apache License 2.0 5 votes vote down vote up
package com.github.benfradet.spark.kafka.writer

import java.io.File
import java.net.InetSocketAddress
import java.util.Arrays.asList
import java.util.Properties

import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}

import scala.util.Random

class KafkaTestUtils {
  // zk
  private val zkHost = "localhost"
  private val zkPort = 2181
  private var zk: EmbeddedZookeeper = _
  private var zkReady = false

  // kafka
  private val brokerHost = "localhost"
  private val brokerPort = 9092
  private var kafkaServer: KafkaServerStartable = _
  private var topicCountMap = Map.empty[String, Int]
  private var brokerReady = false
  private var kafkaAdminClient: AdminClient = _

  
  @scala.annotation.varargs
  def createTopics(topics: String*): Unit =
    for (topic <- topics) {
      kafkaAdminClient.createTopics(asList(new NewTopic(topic, 1, 1: Short)))
      Thread.sleep(1000)
      topicCountMap = topicCountMap + (topic -> 1)
    }

  private def brokerProps: Properties = {
    val props = new Properties
    props.put("broker.id", "0")
    props.put("host.name", brokerHost)
    props.put("log.dir",
      {
        val dir = System.getProperty("java.io.tmpdir") +
          "/logDir-" + new Random().nextInt(Int.MaxValue)
        val f = new File(dir)
        f.mkdirs()
        dir
      }
    )
    props.put("port", brokerPort.toString)
    props.put("zookeeper.connect", zkAddress)
    props.put("zookeeper.connection.timeout.ms", "10000")
    props.put("offsets.topic.replication.factor", "1")
    props
  }

  private class EmbeddedZookeeper(hostname: String, port: Int) {
    private val snapshotDir = {
      val f = new File(System.getProperty("java.io.tmpdir"),
        "snapshotDir-" + Random.nextInt(Int.MaxValue))
      f.mkdirs()
      f
    }
    private val logDir = {
      val f = new File(System.getProperty("java.io.tmpdir"),
        "logDir-" + Random.nextInt(Int.MaxValue))
      f.mkdirs()
      f
    }

    private val factory = {
      val zkTickTime = 500
      val zk = new ZooKeeperServer(snapshotDir, logDir, zkTickTime)
      val f = new NIOServerCnxnFactory
      val maxCnxn = 16
      f.configure(new InetSocketAddress(hostname, port), maxCnxn)
      f.startup(zk)
      f
    }

    def shutdown(): Unit = {
      factory.shutdown()
      snapshotDir.delete()
      logDir.delete()
      ()
    }
  }
} 
Example 3
Source File: KafkaTestBroker.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.test

import java.io.File
import java.util.Properties

import com.google.common.io.Files
import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.test.InstanceSpec

import scala.util.Try


class KafkaTestBroker(zookeeper: CuratorFramework, zookeeperConnectionString: String) {
  val AdminPath = "/admin"
  val BrokersPath = "/brokers"
  val ClusterPath = "/cluster"
  val ConfigPath = "/config"
  val ControllerPath = "/controller"
  val ControllerEpochPath = "/controller_epoch"
  val IsrChangeNotificationPath = "/isr_change_notification"
  val LogDirEventNotificationPath = "/log_dir_event_notification"
  val KafkaAclPath = "/kafka-acl"
  val KafkaAclChangesPath = "/kafka-acl-changes"

  val ConsumersPath = "/consumers"
  val ClusterIdPath = s"$ClusterPath/id"
  val BrokerIdsPath = s"$BrokersPath/ids"
  val BrokerTopicsPath = s"$BrokersPath/topics"
  val ReassignPartitionsPath = s"$AdminPath/reassign_partitions"
  val DeleteTopicsPath = s"$AdminPath/delete_topics"
  val PreferredReplicaLeaderElectionPath = s"$AdminPath/preferred_replica_election"
  val BrokerSequenceIdPath = s"$BrokersPath/seqid"
  val ConfigChangesPath = s"$ConfigPath/changes"
  val ConfigUsersPath = s"$ConfigPath/users"
  val ConfigBrokersPath = s"$ConfigPath/brokers"
  val ProducerIdBlockPath = "/latest_producer_id_block"

  private[this] val port: Int = InstanceSpec.getRandomPort
  private[this] val config: KafkaConfig = buildKafkaConfig(zookeeperConnectionString)
  private[this] val kafkaServerStartable: KafkaServerStartable = new KafkaServerStartable(config)
  kafkaServerStartable.startup()

  //wait until broker shows up in zookeeper
  var count = 0
  while(count < 10 && zookeeper.checkExists().forPath(BrokerIdsPath + "/0") == null) {
    count += 1
    println("Waiting for broker ...")
    println(Option(zookeeper.getData.forPath(BrokerIdsPath + "/0")).map(kafka.manager.asString))
    Thread.sleep(1000)
  }

  private def buildKafkaConfig(zookeeperConnectionString: String): KafkaConfig = {
    val p: Properties = new Properties
    p.setProperty("zookeeper.connect", zookeeperConnectionString)
    p.setProperty("broker.id", "0")
    p.setProperty("port", "" + port)
    p.setProperty("log.dirs", getLogDir)
    p.setProperty("log.retention.hours", "1")
    p.setProperty("offsets.topic.replication.factor", "1")
    p.setProperty("delete.topic.enable", "true")
    new KafkaConfig(p)
  }

  private def getLogDir: String = {
    val logDir: File = Files.createTempDir
    logDir.deleteOnExit()
    logDir.getAbsolutePath
  }

  def getBrokerConnectionString: String = s"localhost:$port"

  def getPort: Int = port

  def shutdown() {
    Try(kafkaServerStartable.shutdown())
  }
} 
Example 4
Source File: EmbeddedKafkaServer.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.kafka

import java.io.File
import java.util.Properties
import java.util.concurrent.TimeUnit

import kafka.server.{KafkaConfig, KafkaServerStartable}
import kafka.zk.BrokerIdZNode
import org.I0Itec.zkclient.ZkClient
import org.I0Itec.zkclient.serialize.ZkSerializer
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.slf4j.LoggerFactory

import scala.collection.JavaConverters._
import scala.collection.mutable

trait EmbeddedKafkaServer extends EmbeddedService with EmbeddedZookeperServer {

  private val log = LoggerFactory.getLogger(classOf[EmbeddedKafka])

  def numPartitions: Int

  private val embeddedKafkaPath = new File(testDir, "local-kafka-logs")
  private val kafkaConfig = new KafkaConfig(new Properties {
    put("broker.id", "1")
    put("host.name", "localhost")
    put("port", "0")
    put("log.dir", embeddedKafkaPath.toString)
    put("num.partitions", numPartitions.toString)
    put("auto.create.topics.enable", "false")
    put("delete.topic.enable", "true")
    put("zookeeper.connect", zkConnect)
    put("offsets.topic.replication.factor", "1")
  })

  private val kafka = new KafkaServerStartable(kafkaConfig)
  kafka.startup()

  lazy val admin = AdminClient.create(Map[String, AnyRef]("bootstrap.servers" -> kafkaBootstrap).asJava)

  def createTopic(name: String): Unit = {
    admin.createTopics(List(new NewTopic(name, numPartitions, 1)).asJava).all().get(30, TimeUnit.SECONDS)
  }

  def listTopics: mutable.Set[String] = {
    admin.listTopics().names().get(1, TimeUnit.SECONDS).asScala
  }

  val tmpZkClient = new ZkClient(zkConnect, 5000, 6000, new ZkSerializer {
    def serialize(o: Object): Array[Byte] = o.toString.getBytes

    override def deserialize(bytes: Array[Byte]): Object = new String(bytes)
  })

  val broker = BrokerIdZNode.decode(1, tmpZkClient.readData[String]("/brokers/ids/1").getBytes("utf-8")).broker
  val kafkaBootstrap = broker.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).connectionString()
  tmpZkClient.close
  log.info(s"Embedded Kafka $kafkaBootstrap, data dir: $testDir")

  abstract override def close(): Unit = try kafka.shutdown finally super.close

}