kafka.utils.TestUtils Java Examples

The following examples show how to use kafka.utils.TestUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SASLClusterTestHarness.java    From kcache with Apache License 2.0 7 votes vote down vote up
@Override
protected KafkaConfig getKafkaConfig(int brokerId) {
    final Option<File> trustStoreFileOption = Option.apply(null);
    final Option<SecurityProtocol> saslInterBrokerSecurityProtocol =
        Option.apply(SecurityProtocol.SASL_PLAINTEXT);
    Properties props = TestUtils.createBrokerConfig(
        brokerId, zkConnect, false, false, TestUtils.RandomPort(), saslInterBrokerSecurityProtocol,
        trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, true, TestUtils.RandomPort(),
        false, TestUtils.RandomPort(),
        false, TestUtils.RandomPort(), Option.<String>empty(), 1, false, 1, (short) 1);

    injectProperties(props);
    props.setProperty("zookeeper.connection.timeout.ms", "30000");
    props.setProperty("sasl.mechanism.inter.broker.protocol", "GSSAPI");
    props.setProperty(SaslConfigs.SASL_ENABLED_MECHANISMS, "GSSAPI");

    return KafkaConfig.fromProps(props);
}
 
Example #2
Source File: ClusterTestHarness.java    From kareldb with Apache License 2.0 7 votes vote down vote up
@Before
public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("localhost:%d", zookeeper.port());

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
        KafkaConfig config = getKafkaConfig(i);
        configs.add(config);

        KafkaServer server = TestUtils.createServer(config, Time.SYSTEM);
        servers.add(server);
    }

    String[] serverUrls = new String[servers.size()];
    ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol());
    for (int i = 0; i < servers.size(); i++) {
        serverUrls[i] =
            Utils.formatAddress(
                servers.get(i).config().advertisedListeners().head().host(),
                servers.get(i).boundPort(listenerType)
            );
    }
    bootstrapServers = Utils.join(serverUrls, ",");
}
 
Example #3
Source File: KafkaMessageSenderPoolTest.java    From message-queue-client-framework with Apache License 2.0 6 votes vote down vote up
@Test
public void test_1() throws Exception {

    KafkaMessageSenderPool<byte[], byte[]> pool1 = new KafkaMessageSenderPool<byte[], byte[]>();

    pool1.getSender();

    pool1.setConfig(new DefaultResourceLoader()
            .getResource("kafka/producer.properties"));

    pool1.setProps(TestUtils.getProducerConfig("localhost:" + port));
    pool1.setPoolSize(2);
    pool1.init();
    pool1.destroy();

    KafkaMessageSenderPool<byte[], byte[]> pool2 = new KafkaMessageSenderPool<byte[], byte[]>();

    pool2.setConfig(new DefaultResourceLoader()
            .getResource("kafka/producer.properties"));

    pool2.setProps(TestUtils.getProducerConfig("localhost:" + port));
    pool2.setPoolSize(0);
    pool2.init();
    pool2.destroy();

}
 
Example #4
Source File: KafkaProducer09IT.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws Exception {
  int zkConnectionTimeout = 6000;
  int zkSessionTimeout = 6000;

  zookeeper = new EmbeddedZookeeper();
  zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
  zkUtils = ZkUtils.apply(
      zkConnect, zkSessionTimeout, zkConnectionTimeout,
      JaasUtils.isZkSecurityEnabled());

  port = NetworkUtils.getRandomPort();
  kafkaServer = TestUtil09.createKafkaServer(port, zkConnect);
  for (int i = 0; i < topics.length; i++) {
    topics[i] = UUID.randomUUID().toString();
    AdminUtils.createTopic(zkUtils, topics[i], 1, 1, new Properties());

    TestUtils.waitUntilMetadataIsPropagated(
        scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)),
        topics[i], 0, 5000);
  }
}
 
Example #5
Source File: KafkaDestinationProcessorTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topic
    AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example #6
Source File: KafkaUtilsTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics
    AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example #7
Source File: EmbeddedKafkaInstance.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Starts the Embedded Kafka and Zookeeper Servers.
 * @throws Exception - If an exeption occurs during startup.
 */
protected void startup() throws Exception {
    // Setup the embedded zookeeper
    logger.info("Starting up Embedded Zookeeper...");
    zkServer = new EmbeddedZookeeper();
    zookeperConnect = ZKHOST + ":" + zkServer.port();
    logger.info("Embedded Zookeeper started at: {}", zookeperConnect);

    // setup Broker
    logger.info("Starting up Embedded Kafka...");
    brokerPort = Integer.toString(PortUtils.getRandomFreePort());
    final Properties brokerProps = new Properties();
    brokerProps.setProperty(KafkaConfig$.MODULE$.BrokerIdProp(), "0");
    brokerProps.setProperty(KafkaConfig$.MODULE$.HostNameProp(), BROKERHOST);
    brokerProps.setProperty(KafkaConfig$.MODULE$.PortProp(), brokerPort);
    brokerProps.setProperty(KafkaConfig$.MODULE$.ZkConnectProp(), zookeperConnect);
    brokerProps.setProperty(KafkaConfig$.MODULE$.LogDirsProp(), Files.createTempDirectory(getClass().getSimpleName() + "-").toAbsolutePath().toString());
    brokerProps.setProperty(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), "true");
    final KafkaConfig config = new KafkaConfig(brokerProps);
    final Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
    logger.info("Embedded Kafka Server started at: {}:{}", BROKERHOST, brokerPort);
}
 
Example #8
Source File: KafkaEntranceProcessorTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics        
    AdminUtils.createTopic(zkUtils, TOPIC_OOS, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example #9
Source File: KafkaExportITBase.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * setup mini kafka and call the super to setup mini fluo
 */
@Before
public void setupKafka() throws Exception {
    // Install an instance of Rya on the Accumulo cluster.
    installRyaInstance();

    // Setup Kafka.
    zkServer = new EmbeddedZookeeper();
    final String zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    final Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    final KafkaConfig config = new KafkaConfig(brokerProps);
    final Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
}
 
Example #10
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
public static void startServer() throws RuntimeException {
  if (serverStarted && serverClosed) {
    throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
  }

  if (!serverStarted) {
    serverStarted = true;
    zkConnect = TestZKUtils.zookeeperConnect();
    zkServer = new EmbeddedZookeeper(zkConnect);
    zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);

    kafkaPort = TestUtils.choosePort();
    Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);

    KafkaConfig config = new KafkaConfig(props);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
  }
}
 
Example #11
Source File: EmbeddedKafka.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public void publish(String topic, List<String> messages)
{
  Properties producerProps = new Properties();
  producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
  producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
  producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");

  try (KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps)) {
    for (String message : messages) {
      ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, message.getBytes(StandardCharsets.UTF_8));
      producer.send(data);
    }
  }

  List<KafkaServer> servers = new ArrayList<KafkaServer>();
  servers.add(kafkaServer);
  TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
 
Example #12
Source File: ClusterTestHarness.java    From kcache with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("localhost:%d", zookeeper.port());

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
        KafkaConfig config = getKafkaConfig(i);
        configs.add(config);

        KafkaServer server = TestUtils.createServer(config, Time.SYSTEM);
        servers.add(server);
    }

    String[] serverUrls = new String[servers.size()];
    ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol());
    for (int i = 0; i < servers.size(); i++) {
        serverUrls[i] =
            Utils.formatAddress(
                servers.get(i).config().advertisedListeners().head().host(),
                servers.get(i).boundPort(listenerType)
            );
    }
    bootstrapServers = Utils.join(serverUrls, ",");
}
 
Example #13
Source File: TestStreamProcessor.java    From samza with Apache License 2.0 6 votes vote down vote up
private void initProducer(String bootstrapServer) {
  producer = TestUtils.createProducer(
      bootstrapServer,
      1,
      60 * 1000L,
      1024L * 1024L,
      0,
      0L,
      5 * 1000L,
      SecurityProtocol.PLAINTEXT,
      null,
      Option$.MODULE$.<Properties>apply(new Properties()),
      new StringSerializer(),
      new ByteArraySerializer(),
      Option$.MODULE$.<Properties>apply(new Properties()));
}
 
Example #14
Source File: KafkaEmbedded.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
/**
 * Creates and starts an embedded Kafka broker.
 *
 * @param config Broker configuration settings.  Used to modify, for example, the listeners
 *               the broker should use.  Note that you cannot change some settings such as
 *               `log.dirs`.
 */
public KafkaEmbedded(final Properties config) throws IOException {
  this.tmpFolder = new TemporaryFolder();
  this.tmpFolder.create();
  this.logDir = tmpFolder.newFolder();
  this.effectiveConfig = effectiveConfigFrom(config, logDir);

  final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, true);
  log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...",
      logDir, zookeeperConnect());

  kafka = TestUtils.createServer(kafkaConfig, new SystemTime());
  log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
      brokerList(), zookeeperConnect());
}
 
Example #15
Source File: EmbeddedKafkaCluster.java    From common-docker with Apache License 2.0 5 votes vote down vote up
private String createKeytab(String principal) {

    File keytabFile = TestUtils.tempFile();

    List<String> principals = new ArrayList<>();
    principals.add(principal);
    kdc.createPrincipal(
        keytabFile,
        JavaConverters.asScalaBuffer(principals).toList()
    );

    log.debug("Keytab file for " + principal + " : " + keytabFile.getAbsolutePath());
    return keytabFile.getAbsolutePath();
  }
 
Example #16
Source File: KafkaComponent.java    From metron with Apache License 2.0 5 votes vote down vote up
public void waitUntilMetadataIsPropagated(String topic, int numPartitions, long timeOutMS) {
  List<KafkaServer> servers = new ArrayList<>();
  servers.add(kafkaServer);
  for(int part = 0;part < numPartitions;++part) {
    TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, part, timeOutMS);
  }
}
 
Example #17
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException {

    startServer();

    this.topic = topic;

    AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties());

    List<KafkaServer> servers = new ArrayList<>();
    servers.add(kafkaServer);
    TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);

    Properties consumeProps = new Properties();
    consumeProps.put("zookeeper.connect", zkConnect);
    consumeProps.put("group.id", "testConsumer");
    consumeProps.put("zookeeper.session.timeout.ms", "10000");
    consumeProps.put("zookeeper.sync.time.ms", "10000");
    consumeProps.put("auto.commit.interval.ms", "10000");
    consumeProps.put("consumer.timeout.ms", "10000");

    consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(this.topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic);
    stream = streams.get(0);

    iterator = stream.iterator();
  }
 
Example #18
Source File: EmbeddedKafkaCluster.java    From common-docker with Apache License 2.0 5 votes vote down vote up
public Properties getClientSecurityConfig() {
  if (enableSASLSSL) {
    Properties clientSecurityProps = TestUtils.producerSecurityConfigs(
        SecurityProtocol.SASL_SSL,
        Option.apply(trustStoreFile),
        Option.apply(saslProperties)
    );

    return clientSecurityProps;
  } else {
    return new Properties();
  }
}
 
Example #19
Source File: EmbeddedKafkaCluster.java    From common-docker with Apache License 2.0 5 votes vote down vote up
private void startBroker(int brokerId, String zkConnectString) throws IOException {
  if (brokerId < 0) {
    throw new IllegalArgumentException("broker id must not be negative");
  }

  Properties props = TestUtils
      .createBrokerConfig(
          brokerId,
          zkConnectString,
          ENABLE_CONTROLLED_SHUTDOWN,
          ENABLE_DELETE_TOPIC,
          0,
          INTER_BROKER_SECURITY_PROTOCOL,
          this.brokerTrustStoreFile,
          this.brokerSaslProperties,
          ENABLE_PLAINTEXT,
          ENABLE_SASL_PLAINTEXT,
          SASL_PLAINTEXT_PORT,
          ENABLE_SSL,
          SSL_PORT,
          this.enableSASLSSL,
          0,
          Option.<String>empty(),
          1,
          false,
          NUM_PARTITIONS,
          DEFAULT_REPLICATION_FACTOR
      );

  KafkaServer broker = TestUtils.createServer(KafkaConfig.fromProps(props), new MockTime());
  brokersById.put(brokerId, broker);
}
 
Example #20
Source File: ClusterTestHarness.java    From kcache with Apache License 2.0 5 votes vote down vote up
protected KafkaConfig getKafkaConfig(int brokerId) {

        final Option<java.io.File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        Properties props = TestUtils.createBrokerConfig(
            brokerId,
            zkConnect,
            false,
            false,
            TestUtils.RandomPort(),
            noInterBrokerSecurityProtocol,
            noFile,
            EMPTY_SASL_PROPERTIES,
            true,
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            Option.<String>empty(),
            1,
            false,
            1,
            (short) 1
        );
        injectProperties(props);
        return KafkaConfig.fromProps(props);
    }
 
Example #21
Source File: SSLClusterTestHarness.java    From kcache with Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaConfig getKafkaConfig(int brokerId) {
    File trustStoreFile;
    try {
        trustStoreFile = File.createTempFile("SSLClusterTestHarness-truststore", ".jks");
    } catch (IOException ioe) {
        throw new RuntimeException("Unable to create temporary file for the truststore.");
    }
    final Option<File> trustStoreFileOption = Option.apply(trustStoreFile);
    final Option<SecurityProtocol> sslInterBrokerSecurityProtocol = Option.apply(SecurityProtocol.SSL);
    Properties props = TestUtils.createBrokerConfig(
        brokerId, zkConnect, false, false, TestUtils.RandomPort(), sslInterBrokerSecurityProtocol,
        trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, false, TestUtils.RandomPort(),
        true, TestUtils.RandomPort(), false, TestUtils.RandomPort(), Option.<String>empty(), 1, false,
        1, (short) 1);

    // setup client SSL. Needs to happen before the broker is initialized, because the client's cert
    // needs to be added to the broker's trust store.
    Map<String, Object> sslConfigs;
    try {
        this.clientSslConfigs = TestSslUtils.createSslConfig(true, true, Mode.CLIENT,
            trustStoreFile, "client", "localhost");
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    injectProperties(props);
    if (requireSSLClientAuth()) {
        props.setProperty("ssl.client.auth", "required");
    }

    return KafkaConfig.fromProps(props);
}
 
Example #22
Source File: ClusterTestHarness.java    From kareldb with Apache License 2.0 5 votes vote down vote up
protected KafkaConfig getKafkaConfig(int brokerId) {

        final Option<java.io.File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        Properties props = TestUtils.createBrokerConfig(
            brokerId,
            zkConnect,
            false,
            false,
            TestUtils.RandomPort(),
            noInterBrokerSecurityProtocol,
            noFile,
            EMPTY_SASL_PROPERTIES,
            true,
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            Option.<String>empty(),
            1,
            false,
            1,
            (short) 1
        );
        injectProperties(props);
        return KafkaConfig.fromProps(props);
    }
 
Example #23
Source File: KafkaComponent.java    From metron with Apache License 2.0 5 votes vote down vote up
public KafkaComponent withBrokerPort(int brokerPort) {
  if(brokerPort <= 0)
  {
    brokerPort = TestUtils.RandomPort();
  }

  this.brokerPort = brokerPort;
  return this;
}
 
Example #24
Source File: TestUtil09.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public static Properties createKafkaConfig(int port, String zkConnect, boolean autoCreateTopic, int numPartitions) {
  final Option<File> noFile = scala.Option.apply(null);
  final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
  Properties props = TestUtils.createBrokerConfig(
    0, zkConnect, false, false, port, noInterBrokerSecurityProtocol,
    noFile, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false,
    TestUtils.RandomPort());
  props.setProperty("auto.create.topics.enable", String.valueOf(autoCreateTopic));
  props.setProperty("num.partitions", String.valueOf(numPartitions));
  props.setProperty("message.max.bytes", "500");
  return props;
}
 
Example #25
Source File: KafkaValidationUtil09IT.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private String createTopic(ZkUtils zkUtils, int partitionCount, KafkaServer kafkaServer) {
  String topic = UUID.randomUUID().toString();
  TestUtil09.createTopic(zkUtils, topic, partitionCount, 1);
  TestUtils.waitUntilMetadataIsPropagated(
    scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)), topic, 0, 3000);
  return topic;
}
 
Example #26
Source File: TestKafkaBroker.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Sets up test Kafka broker.
 *
 * @throws IOException If failed.
 */
private void setupKafkaServer() throws IOException {
    kafkaCfg = new KafkaConfig(getKafkaConfig());

    kafkaSrv = TestUtils.createServer(kafkaCfg, new SystemTime());

    kafkaSrv.startup();
}
 
Example #27
Source File: TestKafkaBroker.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a topic.
 *
 * @param topic Topic name.
 * @param partitions Number of partitions for the topic.
 * @param replicationFactor Replication factor.
 * @throws TimeoutException If operation is timed out.
 * @throws InterruptedException If interrupted.
 */
public void createTopic(String topic, int partitions, int replicationFactor)
    throws TimeoutException, InterruptedException {
    List<KafkaServer> servers = new ArrayList<>();

    servers.add(kafkaSrv);

    KafkaZkClient client = kafkaSrv.zkClient();

    TestUtils.createTopic(client, topic, partitions, replicationFactor,
        scala.collection.JavaConversions.asScalaBuffer(servers), new Properties());
}
 
Example #28
Source File: EmbeddedKafka.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void createTopic(String topic)
{
  AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties());
  List<KafkaServer> servers = new ArrayList<KafkaServer>();
  servers.add(kafkaServer);
  TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 30000);
}
 
Example #29
Source File: EmbeddedKafka.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void start() throws IOException
{
  // Find port
  try {
    ServerSocket serverSocket = new ServerSocket(0);
    BROKERPORT = Integer.toString(serverSocket.getLocalPort());
    serverSocket.close();
  } catch (IOException e) {
    throw Throwables.propagate(e);
  }

  // Setup Zookeeper
  zkServer = new EmbeddedZookeeper();
  String zkConnect = BROKERHOST + ":" + zkServer.port();
  zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
  zkUtils = ZkUtils.apply(zkClient, false);

  // Setup brokers
  cleanupDir();
  Properties props = new Properties();
  props.setProperty("zookeeper.connect", zkConnect);
  props.setProperty("broker.id", "0");
  props.setProperty("log.dirs", KAFKA_PATH);
  props.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);
}
 
Example #30
Source File: TestStreamProcessor.java    From samza with Apache License 2.0 5 votes vote down vote up
private void initConsumer(String bootstrapServer) {
  consumer = TestUtils.createConsumer(
      bootstrapServer,
      "group",
      "earliest",
      4096L,
      "org.apache.kafka.clients.consumer.RangeAssignor",
      30000,
      SecurityProtocol.PLAINTEXT,
      Option$.MODULE$.<File>empty(),
      Option$.MODULE$.<Properties>empty(),
      new StringDeserializer(),
      new ByteArrayDeserializer(),
      Option$.MODULE$.<Properties>empty());
}