kafka.server.KafkaServer Java Examples

The following examples show how to use kafka.server.KafkaServer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClusterTestHarness.java    From kareldb with Apache License 2.0 7 votes vote down vote up
@Before
public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("localhost:%d", zookeeper.port());

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
        KafkaConfig config = getKafkaConfig(i);
        configs.add(config);

        KafkaServer server = TestUtils.createServer(config, Time.SYSTEM);
        servers.add(server);
    }

    String[] serverUrls = new String[servers.size()];
    ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol());
    for (int i = 0; i < servers.size(); i++) {
        serverUrls[i] =
            Utils.formatAddress(
                servers.get(i).config().advertisedListeners().head().host(),
                servers.get(i).boundPort(listenerType)
            );
    }
    bootstrapServers = Utils.join(serverUrls, ",");
}
 
Example #2
Source File: EmbeddedKafka.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public void publish(String topic, List<String> messages)
{
  Properties producerProps = new Properties();
  producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
  producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
  producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");

  try (KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps)) {
    for (String message : messages) {
      ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, message.getBytes(StandardCharsets.UTF_8));
      producer.send(data);
    }
  }

  List<KafkaServer> servers = new ArrayList<KafkaServer>();
  servers.add(kafkaServer);
  TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
 
Example #3
Source File: KafkaNotification.java    From incubator-atlas with Apache License 2.0 6 votes vote down vote up
private void startKafka() throws IOException, URISyntaxException {
    String kafkaValue = properties.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
    LOG.debug("Starting kafka at {}", kafkaValue);
    URL kafkaAddress = getURL(kafkaValue);

    Properties brokerConfig = properties;
    brokerConfig.setProperty("broker.id", "1");
    brokerConfig.setProperty("host.name", kafkaAddress.getHost());
    brokerConfig.setProperty("port", String.valueOf(kafkaAddress.getPort()));
    brokerConfig.setProperty("log.dirs", constructDir("kafka").getAbsolutePath());
    brokerConfig.setProperty("log.flush.interval.messages", String.valueOf(1));

    kafkaServer = new KafkaServer(KafkaConfig.fromProps(brokerConfig), new SystemTime(),
            Option.apply(this.getClass().getName()));
    kafkaServer.startup();
    LOG.debug("Embedded kafka server started with broker config {}", brokerConfig);
}
 
Example #4
Source File: EmbeddedKafkaBroker.java    From ameliant-tools with Apache License 2.0 6 votes vote down vote up
@Override
protected void before() throws Throwable {
    logDirectory = tempDir(perTest("kafka-log"));
    Properties properties = brokerDefinition.getProperties();
    properties.setProperty(KafkaConfig.LogDirProp(), logDirectory.getCanonicalPath());
    kafkaServer = new KafkaServer(new KafkaConfig(properties),
            SystemTime$.MODULE$, Some$.MODULE$.apply("kafkaServer"));
    kafkaServer.startup();

    List<TopicDefinition> topicDefinitions = brokerDefinition.getTopicDefinitions();
    if (!topicDefinitions.isEmpty()) {
        ZkUtils zkUtils = ZkUtils.apply(brokerDefinition.getZookeeperConnect(), 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        for (TopicDefinition topicDefinition : topicDefinitions) {
            String name = topicDefinition.getName();
            log.info("Creating topic {}", name);
            AdminUtils.createTopic(zkUtils,
                    name,
                    topicDefinition.getPartitions(),
                    topicDefinition.getReplicationFactor(),
                    topicDefinition.getProperties());
        }
    }
}
 
Example #5
Source File: ClusterTestHarness.java    From kcache with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("localhost:%d", zookeeper.port());

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
        KafkaConfig config = getKafkaConfig(i);
        configs.add(config);

        KafkaServer server = TestUtils.createServer(config, Time.SYSTEM);
        servers.add(server);
    }

    String[] serverUrls = new String[servers.size()];
    ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol());
    for (int i = 0; i < servers.size(); i++) {
        serverUrls[i] =
            Utils.formatAddress(
                servers.get(i).config().advertisedListeners().head().host(),
                servers.get(i).boundPort(listenerType)
            );
    }
    bootstrapServers = Utils.join(serverUrls, ",");
}
 
Example #6
Source File: IntegrationTestUtils.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
public static void waitUntilMetadataIsPropagated(final List<KafkaServer> servers,
                                                 final String topic,
                                                 final int partition,
                                                 final long timeout) throws InterruptedException {
  TestUtils.waitForCondition(new TestCondition() {
    @Override
    public boolean conditionMet() {
      for (final KafkaServer server : servers) {
        final MetadataCache metadataCache = server.apis().metadataCache();
        final Option<UpdateMetadataRequest.PartitionState> partitionInfo =
                metadataCache.getPartitionInfo(topic, partition);
        if (partitionInfo.isEmpty()) {
          return false;
        }
        final UpdateMetadataRequest.PartitionState metadataPartitionState = partitionInfo.get();
        if (!Request.isValidBrokerId(metadataPartitionState.basePartitionState.leader)) {
          return false;
        }
      }
      return true;
    }
  }, timeout, "metadata for topic=" + topic + " partition=" + partition + " not propagated to all brokers");

}
 
Example #7
Source File: EmbeddedKafkaServer.java    From atlas with Apache License 2.0 6 votes vote down vote up
private void startKafka() throws IOException, URISyntaxException {
    String kafkaValue = properties.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);

    LOG.info("Starting kafka at {}", kafkaValue);

    URL        kafkaAddress = getURL(kafkaValue);
    Properties brokerConfig = properties;

    brokerConfig.setProperty("broker.id", "1");
    brokerConfig.setProperty("host.name", kafkaAddress.getHost());
    brokerConfig.setProperty("port", String.valueOf(kafkaAddress.getPort()));
    brokerConfig.setProperty("log.dirs", constructDir("kafka").getAbsolutePath());
    brokerConfig.setProperty("log.flush.interval.messages", String.valueOf(1));

    List<KafkaMetricsReporter>   metrics          = new ArrayList<>();
    Buffer<KafkaMetricsReporter> metricsReporters = scala.collection.JavaConversions.asScalaBuffer(metrics);

    kafkaServer = new KafkaServer(KafkaConfig.fromProps(brokerConfig), new SystemTime(), Option.apply(this.getClass().getName()), metricsReporters);

    kafkaServer.startup();

    LOG.info("Embedded kafka server started with broker config {}", brokerConfig);
}
 
Example #8
Source File: EmbeddedKafkaCluster.java    From brooklin with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Start up the Kafka cluster
 */
public void startup() {
  for (int i = 0; i < _ports.size(); i++) {
    Integer port = _ports.get(i);
    File logDir = FileUtils.constructRandomDirectoryInTempDir("kafka-local-" + port);

    Properties properties = new Properties();
    properties.putAll(_baseProperties);
    properties.setProperty("zookeeper.connect", _zkConnection);
    properties.setProperty("broker.id", String.valueOf(i + 1));
    properties.setProperty("host.name", "localhost");
    properties.setProperty("port", Integer.toString(port));
    properties.setProperty("log.dir", logDir.getAbsolutePath());
    properties.setProperty("log.flush.interval.messages", String.valueOf(1));
    properties.setProperty("log.cleaner.enable", Boolean.FALSE.toString()); //to save memory
    properties.setProperty("offsets.topic.num.partitions", "1");

    KafkaServer broker = startBroker(properties);

    _brokerList.add(broker);
    _logDirs.add(logDir);
  }
}
 
Example #9
Source File: KafkaStormIntegrationTest.java    From incubator-retired-pirk with Apache License 2.0 6 votes vote down vote up
private void startKafka() throws Exception
{
  FileUtils.deleteDirectory(new File(kafkaTmpDir));

  Properties props = new Properties();
  props.setProperty("zookeeper.session.timeout.ms", "100000");
  props.put("advertised.host.name", "localhost");
  props.put("port", 11111);
  // props.put("broker.id", "0");
  props.put("log.dir", kafkaTmpDir);
  props.put("enable.zookeeper", "true");
  props.put("zookeeper.connect", zookeeperLocalCluster.getConnectString());
  KafkaConfig kafkaConfig = KafkaConfig.fromProps(props);
  kafkaLocalBroker = new KafkaServer(kafkaConfig, new SystemTime(), scala.Option.apply("kafkaThread"));
  kafkaLocalBroker.startup();

  zkClient = new ZkClient(zookeeperLocalCluster.getConnectString(), 60000, 60000, ZKStringSerializer$.MODULE$);
  ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperLocalCluster.getConnectString()), false);
  // ZkUtils zkUtils = ZkUtils.apply(zookeeperLocalCluster.getConnectString(), 60000, 60000, false);
  AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties());
}
 
Example #10
Source File: EmbeddedKafkaServer.java    From twill with Apache License 2.0 6 votes vote down vote up
private KafkaServer createKafkaServer(KafkaConfig kafkaConfig) {
  return new KafkaServer(kafkaConfig, new Time() {

    @Override
    public long milliseconds() {
      return System.currentTimeMillis();
    }

    @Override
    public long nanoseconds() {
      return System.nanoTime();
    }

    @Override
    public void sleep(long ms) {
      try {
        Thread.sleep(ms);
      } catch (InterruptedException e) {
        Thread.interrupted();
      }
    }
  });
}
 
Example #11
Source File: MiniKafkaCluster.java    From AthenaX with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
  for (KafkaServer s : kafkaServer) {
    s.shutdown();
  }
  this.zkServer.close();
  FileUtils.deleteDirectory(tempDir.toFile());
}
 
Example #12
Source File: KafkaComponent.java    From metron with Apache License 2.0 5 votes vote down vote up
@Override
public void start() {
  // setup Zookeeper
  zookeeperConnectString = topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY);

  zkClient = new ZkClient(zookeeperConnectString, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);

  // setup Broker
  Properties props = TestUtilsWrapper.createBrokerConfig(0, zookeeperConnectString, brokerPort);
  props.setProperty("zookeeper.connection.timeout.ms", Integer.toString(KAFKA_ZOOKEEPER_TIMEOUT_MS));
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);

  org.apache.log4j.Level oldLevel = UnitTestHelper.getLog4jLevel(KafkaServer.class);
  UnitTestHelper.setLog4jLevel(KafkaServer.class, org.apache.log4j.Level.OFF);
  // do not proceed until the broker is up
  TestUtilsWrapper.waitUntilBrokerIsRunning(kafkaServer,"Timed out waiting for RunningAsBroker State",100000);

  for(Topic topic : getTopics()) {
    try {
      createTopic(topic.name, topic.numPartitions, KAFKA_PROPAGATE_TIMEOUT_MS);
    } catch (InterruptedException e) {
      throw new RuntimeException("Unable to create topic", e);
    }
  }
  UnitTestHelper.setLog4jLevel(KafkaServer.class, oldLevel);
  if(postStartCallback != null) {
    postStartCallback.apply(this);
  }
}
 
Example #13
Source File: EmbeddedKafka.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void createTopic(String topic)
{
  AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties());
  List<KafkaServer> servers = new ArrayList<KafkaServer>();
  servers.add(kafkaServer);
  TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
 
Example #14
Source File: KafkaComponent.java    From metron with Apache License 2.0 5 votes vote down vote up
public void waitUntilMetadataIsPropagated(String topic, int numPartitions, long timeOutMS) {
  List<KafkaServer> servers = new ArrayList<>();
  servers.add(kafkaServer);
  for(int part = 0;part < numPartitions;++part) {
    TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, part, timeOutMS);
  }
}
 
Example #15
Source File: KafkaValidationUtil09IT.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private String createTopic(ZkUtils zkUtils, int partitionCount, KafkaServer kafkaServer) {
  String topic = UUID.randomUUID().toString();
  TestUtil09.createTopic(zkUtils, topic, partitionCount, 1);
  TestUtils.waitUntilMetadataIsPropagated(
    scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)), topic, 0, 3000);
  return topic;
}
 
Example #16
Source File: EmbeddedKafkaCluster.java    From common-docker with Apache License 2.0 5 votes vote down vote up
private void stopBroker(int brokerId) {
  if (brokersById.containsKey(brokerId)) {
    KafkaServer broker = brokersById.get(brokerId);
    broker.shutdown();
    broker.awaitShutdown();
    CoreUtils.delete(broker.config().logDirs());
    brokersById.remove(brokerId);
  }
}
 
Example #17
Source File: EmbeddedKafkaCluster.java    From common-docker with Apache License 2.0 5 votes vote down vote up
private void startBroker(int brokerId, String zkConnectString) throws IOException {
  if (brokerId < 0) {
    throw new IllegalArgumentException("broker id must not be negative");
  }

  Properties props = TestUtils
      .createBrokerConfig(
          brokerId,
          zkConnectString,
          ENABLE_CONTROLLED_SHUTDOWN,
          ENABLE_DELETE_TOPIC,
          0,
          INTER_BROKER_SECURITY_PROTOCOL,
          this.brokerTrustStoreFile,
          this.brokerSaslProperties,
          ENABLE_PLAINTEXT,
          ENABLE_SASL_PLAINTEXT,
          SASL_PLAINTEXT_PORT,
          ENABLE_SSL,
          SSL_PORT,
          this.enableSASLSSL,
          0,
          Option.<String>empty(),
          1,
          false,
          NUM_PARTITIONS,
          DEFAULT_REPLICATION_FACTOR
      );

  KafkaServer broker = TestUtils.createServer(KafkaConfig.fromProps(props), new MockTime());
  brokersById.put(brokerId, broker);
}
 
Example #18
Source File: IntegrationTestUtils.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
public static void waitForTopicPartitions(final List<KafkaServer> servers,
                                          final List<TopicPartition> partitions,
                                          final long timeout) throws InterruptedException {
  final long end = System.currentTimeMillis() + timeout;
  for (final TopicPartition partition : partitions) {
    final long remaining = end - System.currentTimeMillis();
    if (remaining <= 0) {
      throw new AssertionError("timed out while waiting for partitions to become available. Timeout=" + timeout);
    }
    waitUntilMetadataIsPropagated(servers, partition.topic(), partition.partition(), remaining);
  }
}
 
Example #19
Source File: EmbeddedKafka.java    From kafka-pubsub-emulator with Apache License 2.0 5 votes vote down vote up
public EmbeddedKafka create(Integer nodeId) throws Exception {
  int port = INIT_PORT + nodeId;
  String logDir = temporaryFolder.newFolder().getAbsolutePath();

  KafkaConfig kafkaConfig = getKafkaConfig(nodeId, port, logDir);

  kafkaServer =
      new KafkaServer(kafkaConfig, Time.SYSTEM, Option.empty(), asScalaBuffer(new ArrayList<>()));
  kafkaServer.startup();
  return this;
}
 
Example #20
Source File: TestKafkaBroker.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a topic.
 *
 * @param topic Topic name.
 * @param partitions Number of partitions for the topic.
 * @param replicationFactor Replication factor.
 * @throws TimeoutException If operation is timed out.
 * @throws InterruptedException If interrupted.
 */
public void createTopic(String topic, int partitions, int replicationFactor)
    throws TimeoutException, InterruptedException {
    List<KafkaServer> servers = new ArrayList<>();

    servers.add(kafkaSrv);

    KafkaZkClient client = kafkaSrv.zkClient();

    TestUtils.createTopic(client, topic, partitions, replicationFactor,
        scala.collection.JavaConversions.asScalaBuffer(servers), new Properties());
}
 
Example #21
Source File: ZookeeperBrokersTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "4"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #22
Source File: TestUtil09.java    From datacollector with Apache License 2.0 4 votes vote down vote up
public static KafkaServer createKafkaServer(int port, String zkConnect) {
  return createKafkaServer(port, zkConnect, true);
}
 
Example #23
Source File: ZookeeperHostsTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #24
Source File: KafkaMessageNewReceiverTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {

    }
}
 
Example #25
Source File: KafkaMessageNewSenderTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {

    try {


        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "4"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #26
Source File: NewReceiverWithSpringTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #27
Source File: NewSenderWithSpringTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {


        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);

    } catch (Exception e) {
    }
}
 
Example #28
Source File: MiniKafkaCluster.java    From AthenaX with Apache License 2.0 4 votes vote down vote up
public List<KafkaServer> getKafkaServer() {
  return kafkaServer;
}
 
Example #29
Source File: KafkaBrokerTestHarness.java    From common-kafka with Apache License 2.0 4 votes vote down vote up
private static KafkaServer startBroker(KafkaConfig config) {
    KafkaServer server = new KafkaServer(config, Time.SYSTEM, Option.empty(),
            new scala.collection.mutable.MutableList<>());
    server.startup();
    return server;
}
 
Example #30
Source File: KafkaMessageReceiverImplTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        Properties kafkaProps2 = TestUtils.createBrokerConfig(brokerId + 1,
                zkConnect, false, false, (port - 1),
                noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false,
                TestUtils.RandomPort(), false, TestUtils.RandomPort(), false,
                TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps2.setProperty("auto.create.topics.enable", "true");
        kafkaProps2.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps2.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps2.setProperty("host.name", "localhost");
        kafkaProps2.setProperty("port", (port - 1) + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        KafkaConfig config2 = new KafkaConfig(kafkaProps2);

        Time mock = new SystemTime();
        Time mock2 = new SystemTime();

        kafkaServer = TestUtils.createServer(config, mock);
        KafkaServer kafkaServer2 = TestUtils.createServer(config2, mock2);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "2", "--partitions", "2"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        servers.add(kafkaServer2);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}