Java Code Examples for kafka.utils.TestUtils

The following examples show how to use kafka.utils.TestUtils. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: kareldb   Source File: ClusterTestHarness.java    License: Apache License 2.0 7 votes vote down vote up
@Before
public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("localhost:%d", zookeeper.port());

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
        KafkaConfig config = getKafkaConfig(i);
        configs.add(config);

        KafkaServer server = TestUtils.createServer(config, Time.SYSTEM);
        servers.add(server);
    }

    String[] serverUrls = new String[servers.size()];
    ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol());
    for (int i = 0; i < servers.size(); i++) {
        serverUrls[i] =
            Utils.formatAddress(
                servers.get(i).config().advertisedListeners().head().host(),
                servers.get(i).boundPort(listenerType)
            );
    }
    bootstrapServers = Utils.join(serverUrls, ",");
}
 
Example 2
Source Project: kcache   Source File: SASLClusterTestHarness.java    License: Apache License 2.0 7 votes vote down vote up
@Override
protected KafkaConfig getKafkaConfig(int brokerId) {
    final Option<File> trustStoreFileOption = Option.apply(null);
    final Option<SecurityProtocol> saslInterBrokerSecurityProtocol =
        Option.apply(SecurityProtocol.SASL_PLAINTEXT);
    Properties props = TestUtils.createBrokerConfig(
        brokerId, zkConnect, false, false, TestUtils.RandomPort(), saslInterBrokerSecurityProtocol,
        trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, true, TestUtils.RandomPort(),
        false, TestUtils.RandomPort(),
        false, TestUtils.RandomPort(), Option.<String>empty(), 1, false, 1, (short) 1);

    injectProperties(props);
    props.setProperty("zookeeper.connection.timeout.ms", "30000");
    props.setProperty("sasl.mechanism.inter.broker.protocol", "GSSAPI");
    props.setProperty(SaslConfigs.SASL_ENABLED_MECHANISMS, "GSSAPI");

    return KafkaConfig.fromProps(props);
}
 
Example 3
Source Project: kcache   Source File: ClusterTestHarness.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("localhost:%d", zookeeper.port());

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
        KafkaConfig config = getKafkaConfig(i);
        configs.add(config);

        KafkaServer server = TestUtils.createServer(config, Time.SYSTEM);
        servers.add(server);
    }

    String[] serverUrls = new String[servers.size()];
    ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol());
    for (int i = 0; i < servers.size(); i++) {
        serverUrls[i] =
            Utils.formatAddress(
                servers.get(i).config().advertisedListeners().head().host(),
                servers.get(i).boundPort(listenerType)
            );
    }
    bootstrapServers = Utils.join(serverUrls, ",");
}
 
Example 4
@Test
public void test_1() throws Exception {

    KafkaMessageSenderPool<byte[], byte[]> pool1 = new KafkaMessageSenderPool<byte[], byte[]>();

    pool1.getSender();

    pool1.setConfig(new DefaultResourceLoader()
            .getResource("kafka/producer.properties"));

    pool1.setProps(TestUtils.getProducerConfig("localhost:" + port));
    pool1.setPoolSize(2);
    pool1.init();
    pool1.destroy();

    KafkaMessageSenderPool<byte[], byte[]> pool2 = new KafkaMessageSenderPool<byte[], byte[]>();

    pool2.setConfig(new DefaultResourceLoader()
            .getResource("kafka/producer.properties"));

    pool2.setProps(TestUtils.getProducerConfig("localhost:" + port));
    pool2.setPoolSize(0);
    pool2.init();
    pool2.destroy();

}
 
Example 5
Source Project: samza   Source File: TestStreamProcessor.java    License: Apache License 2.0 6 votes vote down vote up
private void initProducer(String bootstrapServer) {
  producer = TestUtils.createProducer(
      bootstrapServer,
      1,
      60 * 1000L,
      1024L * 1024L,
      0,
      0L,
      5 * 1000L,
      SecurityProtocol.PLAINTEXT,
      null,
      Option$.MODULE$.<Properties>apply(new Properties()),
      new StringSerializer(),
      new ByteArraySerializer(),
      Option$.MODULE$.<Properties>apply(new Properties()));
}
 
Example 6
Source Project: attic-apex-malhar   Source File: EmbeddedKafka.java    License: Apache License 2.0 6 votes vote down vote up
public void publish(String topic, List<String> messages)
{
  Properties producerProps = new Properties();
  producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
  producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
  producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");

  try (KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps)) {
    for (String message : messages) {
      ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, message.getBytes(StandardCharsets.UTF_8));
      producer.send(data);
    }
  }

  List<KafkaServer> servers = new ArrayList<KafkaServer>();
  servers.add(kafkaServer);
  TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
 
Example 7
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics        
    AdminUtils.createTopic(zkUtils, TOPIC_OOS, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example 8
Source Project: incubator-samoa   Source File: KafkaUtilsTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics
    AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example 9
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topic
    AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example 10
Source Project: datacollector   Source File: KafkaProducer09IT.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws Exception {
  int zkConnectionTimeout = 6000;
  int zkSessionTimeout = 6000;

  zookeeper = new EmbeddedZookeeper();
  zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
  zkUtils = ZkUtils.apply(
      zkConnect, zkSessionTimeout, zkConnectionTimeout,
      JaasUtils.isZkSecurityEnabled());

  port = NetworkUtils.getRandomPort();
  kafkaServer = TestUtil09.createKafkaServer(port, zkConnect);
  for (int i = 0; i < topics.length; i++) {
    topics[i] = UUID.randomUUID().toString();
    AdminUtils.createTopic(zkUtils, topics[i], 1, 1, new Properties());

    TestUtils.waitUntilMetadataIsPropagated(
        scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)),
        topics[i], 0, 5000);
  }
}
 
Example 11
Source Project: rya   Source File: EmbeddedKafkaInstance.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Starts the Embedded Kafka and Zookeeper Servers.
 * @throws Exception - If an exeption occurs during startup.
 */
protected void startup() throws Exception {
    // Setup the embedded zookeeper
    logger.info("Starting up Embedded Zookeeper...");
    zkServer = new EmbeddedZookeeper();
    zookeperConnect = ZKHOST + ":" + zkServer.port();
    logger.info("Embedded Zookeeper started at: {}", zookeperConnect);

    // setup Broker
    logger.info("Starting up Embedded Kafka...");
    brokerPort = Integer.toString(PortUtils.getRandomFreePort());
    final Properties brokerProps = new Properties();
    brokerProps.setProperty(KafkaConfig$.MODULE$.BrokerIdProp(), "0");
    brokerProps.setProperty(KafkaConfig$.MODULE$.HostNameProp(), BROKERHOST);
    brokerProps.setProperty(KafkaConfig$.MODULE$.PortProp(), brokerPort);
    brokerProps.setProperty(KafkaConfig$.MODULE$.ZkConnectProp(), zookeperConnect);
    brokerProps.setProperty(KafkaConfig$.MODULE$.LogDirsProp(), Files.createTempDirectory(getClass().getSimpleName() + "-").toAbsolutePath().toString());
    brokerProps.setProperty(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), "true");
    final KafkaConfig config = new KafkaConfig(brokerProps);
    final Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
    logger.info("Embedded Kafka Server started at: {}:{}", BROKERHOST, brokerPort);
}
 
Example 12
Source Project: rya   Source File: KafkaExportITBase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * setup mini kafka and call the super to setup mini fluo
 */
@Before
public void setupKafka() throws Exception {
    // Install an instance of Rya on the Accumulo cluster.
    installRyaInstance();

    // Setup Kafka.
    zkServer = new EmbeddedZookeeper();
    final String zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    final Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    final KafkaConfig config = new KafkaConfig(brokerProps);
    final Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
}
 
Example 13
Source Project: incubator-gobblin   Source File: KafkaTestBase.java    License: Apache License 2.0 6 votes vote down vote up
public static void startServer() throws RuntimeException {
  if (serverStarted && serverClosed) {
    throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
  }

  if (!serverStarted) {
    serverStarted = true;
    zkConnect = TestZKUtils.zookeeperConnect();
    zkServer = new EmbeddedZookeeper(zkConnect);
    zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);

    kafkaPort = TestUtils.choosePort();
    Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);

    KafkaConfig config = new KafkaConfig(props);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
  }
}
 
Example 14
Source Project: kareldb   Source File: ClusterTestHarness.java    License: Apache License 2.0 5 votes vote down vote up
protected KafkaConfig getKafkaConfig(int brokerId) {

        final Option<java.io.File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        Properties props = TestUtils.createBrokerConfig(
            brokerId,
            zkConnect,
            false,
            false,
            TestUtils.RandomPort(),
            noInterBrokerSecurityProtocol,
            noFile,
            EMPTY_SASL_PROPERTIES,
            true,
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            Option.<String>empty(),
            1,
            false,
            1,
            (short) 1
        );
        injectProperties(props);
        return KafkaConfig.fromProps(props);
    }
 
Example 15
/**
 * Creates and starts an embedded Kafka broker.
 *
 * @param config Broker configuration settings.  Used to modify, for example, the listeners
 *               the broker should use.  Note that you cannot change some settings such as
 *               `log.dirs`.
 */
public KafkaEmbedded(final Properties config) throws IOException {
  this.tmpFolder = new TemporaryFolder();
  this.tmpFolder.create();
  this.logDir = tmpFolder.newFolder();
  this.effectiveConfig = effectiveConfigFrom(config, logDir);

  final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, true);
  log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...",
      logDir, zookeeperConnect());

  kafka = TestUtils.createServer(kafkaConfig, new SystemTime());
  log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
      brokerList(), zookeeperConnect());
}
 
Example 16
Source Project: kcache   Source File: SSLClusterTestHarness.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaConfig getKafkaConfig(int brokerId) {
    File trustStoreFile;
    try {
        trustStoreFile = File.createTempFile("SSLClusterTestHarness-truststore", ".jks");
    } catch (IOException ioe) {
        throw new RuntimeException("Unable to create temporary file for the truststore.");
    }
    final Option<File> trustStoreFileOption = Option.apply(trustStoreFile);
    final Option<SecurityProtocol> sslInterBrokerSecurityProtocol = Option.apply(SecurityProtocol.SSL);
    Properties props = TestUtils.createBrokerConfig(
        brokerId, zkConnect, false, false, TestUtils.RandomPort(), sslInterBrokerSecurityProtocol,
        trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, false, TestUtils.RandomPort(),
        true, TestUtils.RandomPort(), false, TestUtils.RandomPort(), Option.<String>empty(), 1, false,
        1, (short) 1);

    // setup client SSL. Needs to happen before the broker is initialized, because the client's cert
    // needs to be added to the broker's trust store.
    Map<String, Object> sslConfigs;
    try {
        this.clientSslConfigs = TestSslUtils.createSslConfig(true, true, Mode.CLIENT,
            trustStoreFile, "client", "localhost");
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    injectProperties(props);
    if (requireSSLClientAuth()) {
        props.setProperty("ssl.client.auth", "required");
    }

    return KafkaConfig.fromProps(props);
}
 
Example 17
Source Project: kcache   Source File: ClusterTestHarness.java    License: Apache License 2.0 5 votes vote down vote up
protected KafkaConfig getKafkaConfig(int brokerId) {

        final Option<java.io.File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        Properties props = TestUtils.createBrokerConfig(
            brokerId,
            zkConnect,
            false,
            false,
            TestUtils.RandomPort(),
            noInterBrokerSecurityProtocol,
            noFile,
            EMPTY_SASL_PROPERTIES,
            true,
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            Option.<String>empty(),
            1,
            false,
            1,
            (short) 1
        );
        injectProperties(props);
        return KafkaConfig.fromProps(props);
    }
 
Example 18
Source Project: common-docker   Source File: EmbeddedKafkaCluster.java    License: Apache License 2.0 5 votes vote down vote up
private String createKeytab(String principal) {

    File keytabFile = TestUtils.tempFile();

    List<String> principals = new ArrayList<>();
    principals.add(principal);
    kdc.createPrincipal(
        keytabFile,
        JavaConverters.asScalaBuffer(principals).toList()
    );

    log.debug("Keytab file for " + principal + " : " + keytabFile.getAbsolutePath());
    return keytabFile.getAbsolutePath();
  }
 
Example 19
Source Project: common-docker   Source File: EmbeddedKafkaCluster.java    License: Apache License 2.0 5 votes vote down vote up
public Properties getClientSecurityConfig() {
  if (enableSASLSSL) {
    Properties clientSecurityProps = TestUtils.producerSecurityConfigs(
        SecurityProtocol.SASL_SSL,
        Option.apply(trustStoreFile),
        Option.apply(saslProperties)
    );

    return clientSecurityProps;
  } else {
    return new Properties();
  }
}
 
Example 20
Source Project: common-docker   Source File: EmbeddedKafkaCluster.java    License: Apache License 2.0 5 votes vote down vote up
private void startBroker(int brokerId, String zkConnectString) throws IOException {
  if (brokerId < 0) {
    throw new IllegalArgumentException("broker id must not be negative");
  }

  Properties props = TestUtils
      .createBrokerConfig(
          brokerId,
          zkConnectString,
          ENABLE_CONTROLLED_SHUTDOWN,
          ENABLE_DELETE_TOPIC,
          0,
          INTER_BROKER_SECURITY_PROTOCOL,
          this.brokerTrustStoreFile,
          this.brokerSaslProperties,
          ENABLE_PLAINTEXT,
          ENABLE_SASL_PLAINTEXT,
          SASL_PLAINTEXT_PORT,
          ENABLE_SSL,
          SSL_PORT,
          this.enableSASLSSL,
          0,
          Option.<String>empty(),
          1,
          false,
          NUM_PARTITIONS,
          DEFAULT_REPLICATION_FACTOR
      );

  KafkaServer broker = TestUtils.createServer(KafkaConfig.fromProps(props), new MockTime());
  brokersById.put(brokerId, broker);
}
 
Example 21
private void createTopic(String topic) {
    final ZkClient zkClient = new ZkClient(zookeeper.getConnectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
    final ZkConnection connection = new ZkConnection(zookeeper.getConnectString());
    final ZkUtils zkUtils = new ZkUtils(zkClient, connection, false);
    AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    TestUtils.waitUntilMetadataIsPropagated(JavaConversions.asScalaBuffer(Lists.newArrayList(kafkaServer)), topic, 0, 10000);
    zkClient.close();
}
 
Example 22
Source Project: samza   Source File: TestStreamProcessor.java    License: Apache License 2.0 5 votes vote down vote up
private void initConsumer(String bootstrapServer) {
  consumer = TestUtils.createConsumer(
      bootstrapServer,
      "group",
      "earliest",
      4096L,
      "org.apache.kafka.clients.consumer.RangeAssignor",
      30000,
      SecurityProtocol.PLAINTEXT,
      Option$.MODULE$.<File>empty(),
      Option$.MODULE$.<Properties>empty(),
      new StringDeserializer(),
      new ByteArrayDeserializer(),
      Option$.MODULE$.<Properties>empty());
}
 
Example 23
Source Project: attic-apex-malhar   Source File: EmbeddedKafka.java    License: Apache License 2.0 5 votes vote down vote up
public void start() throws IOException
{
  // Find port
  try {
    ServerSocket serverSocket = new ServerSocket(0);
    BROKERPORT = Integer.toString(serverSocket.getLocalPort());
    serverSocket.close();
  } catch (IOException e) {
    throw Throwables.propagate(e);
  }

  // Setup Zookeeper
  zkServer = new EmbeddedZookeeper();
  String zkConnect = BROKERHOST + ":" + zkServer.port();
  zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
  zkUtils = ZkUtils.apply(zkClient, false);

  // Setup brokers
  cleanupDir();
  Properties props = new Properties();
  props.setProperty("zookeeper.connect", zkConnect);
  props.setProperty("broker.id", "0");
  props.setProperty("log.dirs", KAFKA_PATH);
  props.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);
}
 
Example 24
Source Project: attic-apex-malhar   Source File: EmbeddedKafka.java    License: Apache License 2.0 5 votes vote down vote up
public void createTopic(String topic)
{
  AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties());
  List<KafkaServer> servers = new ArrayList<KafkaServer>();
  servers.add(kafkaServer);
  TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
 
Example 25
Source Project: ignite   Source File: TestKafkaBroker.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a topic.
 *
 * @param topic Topic name.
 * @param partitions Number of partitions for the topic.
 * @param replicationFactor Replication factor.
 * @throws TimeoutException If operation is timed out.
 * @throws InterruptedException If interrupted.
 */
public void createTopic(String topic, int partitions, int replicationFactor)
    throws TimeoutException, InterruptedException {
    List<KafkaServer> servers = new ArrayList<>();

    servers.add(kafkaSrv);

    KafkaZkClient client = kafkaSrv.zkClient();

    TestUtils.createTopic(client, topic, partitions, replicationFactor,
        scala.collection.JavaConversions.asScalaBuffer(servers), new Properties());
}
 
Example 26
Source Project: ignite   Source File: TestKafkaBroker.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Sets up test Kafka broker.
 *
 * @throws IOException If failed.
 */
private void setupKafkaServer() throws IOException {
    kafkaCfg = new KafkaConfig(getKafkaConfig());

    kafkaSrv = TestUtils.createServer(kafkaCfg, new SystemTime());

    kafkaSrv.startup();
}
 
Example 27
Source Project: datacollector   Source File: KafkaValidationUtil09IT.java    License: Apache License 2.0 5 votes vote down vote up
private String createTopic(ZkUtils zkUtils, int partitionCount, KafkaServer kafkaServer) {
  String topic = UUID.randomUUID().toString();
  TestUtil09.createTopic(zkUtils, topic, partitionCount, 1);
  TestUtils.waitUntilMetadataIsPropagated(
    scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)), topic, 0, 3000);
  return topic;
}
 
Example 28
Source Project: datacollector   Source File: TestUtil09.java    License: Apache License 2.0 5 votes vote down vote up
public static Properties createKafkaConfig(int port, String zkConnect, boolean autoCreateTopic, int numPartitions) {
  final Option<File> noFile = scala.Option.apply(null);
  final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
  Properties props = TestUtils.createBrokerConfig(
    0, zkConnect, false, false, port, noInterBrokerSecurityProtocol,
    noFile, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false,
    TestUtils.RandomPort());
  props.setProperty("auto.create.topics.enable", String.valueOf(autoCreateTopic));
  props.setProperty("num.partitions", String.valueOf(numPartitions));
  props.setProperty("message.max.bytes", "500");
  return props;
}
 
Example 29
Source Project: metron   Source File: KafkaComponent.java    License: Apache License 2.0 5 votes vote down vote up
public KafkaComponent withBrokerPort(int brokerPort) {
  if(brokerPort <= 0)
  {
    brokerPort = TestUtils.RandomPort();
  }

  this.brokerPort = brokerPort;
  return this;
}
 
Example 30
Source Project: metron   Source File: KafkaComponent.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void start() {
  // setup Zookeeper
  zookeeperConnectString = topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY);

  zkClient = new ZkClient(zookeeperConnectString, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);

  // setup Broker
  Properties props = TestUtilsWrapper.createBrokerConfig(0, zookeeperConnectString, brokerPort);
  props.setProperty("zookeeper.connection.timeout.ms", Integer.toString(KAFKA_ZOOKEEPER_TIMEOUT_MS));
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);

  org.apache.log4j.Level oldLevel = UnitTestHelper.getLog4jLevel(KafkaServer.class);
  UnitTestHelper.setLog4jLevel(KafkaServer.class, org.apache.log4j.Level.OFF);
  // do not proceed until the broker is up
  TestUtilsWrapper.waitUntilBrokerIsRunning(kafkaServer,"Timed out waiting for RunningAsBroker State",100000);

  for(Topic topic : getTopics()) {
    try {
      createTopic(topic.name, topic.numPartitions, KAFKA_PROPAGATE_TIMEOUT_MS);
    } catch (InterruptedException e) {
      throw new RuntimeException("Unable to create topic", e);
    }
  }
  UnitTestHelper.setLog4jLevel(KafkaServer.class, oldLevel);
  if(postStartCallback != null) {
    postStartCallback.apply(this);
  }
}