Java Code Examples for kafka.utils.TestUtils#createServer()

The following examples show how to use kafka.utils.TestUtils#createServer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaEmbedded.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
/**
 * Creates and starts an embedded Kafka broker.
 *
 * @param config Broker configuration settings.  Used to modify, for example, the listeners
 *               the broker should use.  Note that you cannot change some settings such as
 *               `log.dirs`.
 */
public KafkaEmbedded(final Properties config) throws IOException {
  this.tmpFolder = new TemporaryFolder();
  this.tmpFolder.create();
  this.logDir = tmpFolder.newFolder();
  this.effectiveConfig = effectiveConfigFrom(config, logDir);

  final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, true);
  log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...",
      logDir, zookeeperConnect());

  kafka = TestUtils.createServer(kafkaConfig, new SystemTime());
  log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
      brokerList(), zookeeperConnect());
}
 
Example 2
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
public static void startServer() throws RuntimeException {
  if (serverStarted && serverClosed) {
    throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
  }

  if (!serverStarted) {
    serverStarted = true;
    zkConnect = TestZKUtils.zookeeperConnect();
    zkServer = new EmbeddedZookeeper(zkConnect);
    zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);

    kafkaPort = TestUtils.choosePort();
    Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);

    KafkaConfig config = new KafkaConfig(props);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
  }
}
 
Example 3
Source File: KafkaEntranceProcessorTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics        
    AdminUtils.createTopic(zkUtils, TOPIC_OOS, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example 4
Source File: KafkaDestinationProcessorTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topic
    AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example 5
Source File: KafkaUtilsTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics
    AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example 6
Source File: EmbeddedKafkaCluster.java    From common-docker with Apache License 2.0 5 votes vote down vote up
private void startBroker(int brokerId, String zkConnectString) throws IOException {
  if (brokerId < 0) {
    throw new IllegalArgumentException("broker id must not be negative");
  }

  Properties props = TestUtils
      .createBrokerConfig(
          brokerId,
          zkConnectString,
          ENABLE_CONTROLLED_SHUTDOWN,
          ENABLE_DELETE_TOPIC,
          0,
          INTER_BROKER_SECURITY_PROTOCOL,
          this.brokerTrustStoreFile,
          this.brokerSaslProperties,
          ENABLE_PLAINTEXT,
          ENABLE_SASL_PLAINTEXT,
          SASL_PLAINTEXT_PORT,
          ENABLE_SSL,
          SSL_PORT,
          this.enableSASLSSL,
          0,
          Option.<String>empty(),
          1,
          false,
          NUM_PARTITIONS,
          DEFAULT_REPLICATION_FACTOR
      );

  KafkaServer broker = TestUtils.createServer(KafkaConfig.fromProps(props), new MockTime());
  brokersById.put(brokerId, broker);
}
 
Example 7
Source File: KafkaComponent.java    From metron with Apache License 2.0 5 votes vote down vote up
@Override
public void start() {
  // setup Zookeeper
  zookeeperConnectString = topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY);

  zkClient = new ZkClient(zookeeperConnectString, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$);

  // setup Broker
  Properties props = TestUtilsWrapper.createBrokerConfig(0, zookeeperConnectString, brokerPort);
  props.setProperty("zookeeper.connection.timeout.ms", Integer.toString(KAFKA_ZOOKEEPER_TIMEOUT_MS));
  KafkaConfig config = new KafkaConfig(props);
  Time mock = new MockTime();
  kafkaServer = TestUtils.createServer(config, mock);

  org.apache.log4j.Level oldLevel = UnitTestHelper.getLog4jLevel(KafkaServer.class);
  UnitTestHelper.setLog4jLevel(KafkaServer.class, org.apache.log4j.Level.OFF);
  // do not proceed until the broker is up
  TestUtilsWrapper.waitUntilBrokerIsRunning(kafkaServer,"Timed out waiting for RunningAsBroker State",100000);

  for(Topic topic : getTopics()) {
    try {
      createTopic(topic.name, topic.numPartitions, KAFKA_PROPAGATE_TIMEOUT_MS);
    } catch (InterruptedException e) {
      throw new RuntimeException("Unable to create topic", e);
    }
  }
  UnitTestHelper.setLog4jLevel(KafkaServer.class, oldLevel);
  if(postStartCallback != null) {
    postStartCallback.apply(this);
  }
}
 
Example 8
Source File: KafkaProducerApp.java    From bpmn.ai with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
public static void main(String[] args) throws IOException {

        // setup Zookeeper
        zkServer = new EmbeddedZookeeper();
        String zkConnect = ZOOKEEPER_HOST + ":" + zkServer.port();
        zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
        ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

        // setup Kafka
        Properties brokerProps = new Properties();
        brokerProps.setProperty("zookeeper.connect", zkConnect);
        brokerProps.setProperty("broker.id", "0");
        brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
        brokerProps.setProperty("listeners", "PLAINTEXT://" + KAFKA_HOST + ":" + KAFKA_PORT);
        brokerProps.setProperty("offsets.topic.replication.factor", "1");
        KafkaConfig config = new KafkaConfig(brokerProps);
        Time mock = new MockTime();
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        AdminUtils.createTopic(zkUtils, TOPIC_PROCESS_INSTANCE, 1, 1, new Properties(),
                RackAwareMode.Disabled$.MODULE$);
        AdminUtils.createTopic(zkUtils, TOPIC_VARIABLE_UPDATE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

        // setup producer
        Properties producerProps = new Properties();
        producerProps.setProperty("bootstrap.servers", KAFKA_HOST + ":" + KAFKA_PORT);
        producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
        producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<Integer, String> producer = new KafkaProducer<>(producerProps);

        // fill in test data
        try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_PROCESS_INSTANCE))) {
            stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_PROCESS_INSTANCE, 0, 0, l)));
        }

        try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_VARIABLE_UPDATE))) {
            stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_VARIABLE_UPDATE, 0, 0, l)));
        }

        System.out.println("Kafka Server is running and listening on " + KAFKA_HOST + ":" + KAFKA_PORT + " ...");
    }
 
Example 9
Source File: KafkaImportApplicationIntegrationTest.java    From bpmn.ai with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
    //System.setProperty("hadoop.home.dir", "C:\\Users\\b60\\Desktop\\hadoop-2.6.0\\hadoop-2.6.0");

    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    String zkConnect = ZOOKEEPER_HOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Kafka
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + KAFKA_HOST + ":" + KAFKA_PORT);
    brokerProps.setProperty("offsets.topic.replication.factor" , "1");
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topic
    AdminUtils.createTopic(zkUtils, TOPIC_PROCESS_INSTANCE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    AdminUtils.createTopic(zkUtils, TOPIC_ACTIVITY_INSTANCE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    AdminUtils.createTopic(zkUtils, TOPIC_VARIABLE_UPDATE, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

    // setup producer
    Properties producerProps = new Properties();
    producerProps.setProperty("bootstrap.servers", KAFKA_HOST + ":" + KAFKA_PORT);
    producerProps.setProperty("key.serializer","org.apache.kafka.common.serialization.IntegerSerializer");
    producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    KafkaProducer<Integer, String> producer = new KafkaProducer<>(producerProps);

    //fill in test data
    try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_PROCESS_INSTANCE))) {
        stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_PROCESS_INSTANCE, 0, 0, l)));
    }

    try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_ACTIVITY_INSTANCE))) {
        stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_ACTIVITY_INSTANCE, 0, 0, l)));
    }

    try (Stream<String> stream = Files.lines(Paths.get(FILE_STREAM_INPUT_VARIABLE_UPDATE))) {
        stream.forEach(l -> producer.send(new ProducerRecord<>(TOPIC_VARIABLE_UPDATE, 0, 0, l)));
    }
}
 
Example 10
Source File: SecureKafkaBase.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Test
public void testSecureKafkaReadWrite() throws IOException, StageException, URISyntaxException, GeneralSecurityException {
  final String message = "Hello StreamSets";

  // create broker config with auto create topic option enabled
  Properties props = TestUtil09.createKafkaConfig(getPlainTextPort(), zkConnect, true, 1);
  // add SSL configuration properties to broker
  addBrokerSecurityConfig(props);
  // create server
  KafkaConfig kafkaConfig = new KafkaConfig(props);
  server = TestUtils.createServer(kafkaConfig, SystemTime$.MODULE$);

  // create and init consumer
  Source.Context sourceContext = ContextInfoCreator.createSourceContext(
    "s",
    false,
    OnRecordError.TO_ERROR,
    ImmutableList.of("a")
  );
  Map<String, Object> consumerConfig = new HashMap<>();
  consumerConfig.put("auto.commit.interval.ms", "1000");
  consumerConfig.put("auto.offset.reset", "earliest");
  consumerConfig.put("session.timeout.ms", "30000");
  consumerConfig.put(KafkaConstants.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  consumerConfig.put(KafkaConstants.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
  addClientSecurityConfig(consumerConfig);
  ConsumerFactorySettings consumerFactorySettings = new ConsumerFactorySettings(
    zkConnect,
    String.format("localhost:%d", getSecurePort()),
    getTopic(),
    1000,
    sourceContext,
    consumerConfig,
    "test",
    100,
    false,
    KafkaAutoOffsetReset.EARLIEST.name().toLowerCase(),
    0
  );
  SdcKafkaConsumerFactory sdcKafkaConsumerFactory = SdcKafkaConsumerFactory.create(consumerFactorySettings);
  SdcKafkaConsumer sdcKafkaConsumer = sdcKafkaConsumerFactory.create();
  sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext);
  sdcKafkaConsumer.init();

  // create and init producer
  HashMap<String, Object> kafkaProducerConfigs = new HashMap<>();
  kafkaProducerConfigs.put("retries", 0);
  kafkaProducerConfigs.put("batch.size", 10);
  kafkaProducerConfigs.put("linger.ms", 1000);
  addClientSecurityConfig(kafkaProducerConfigs);
  ProducerFactorySettings settings = new ProducerFactorySettings(
    kafkaProducerConfigs,
    PartitionStrategy.DEFAULT,
    "localhost:" + getSecurePort(),
    DataFormat.JSON,
    false
  );
  SdcKafkaProducerFactory sdcKafkaProducerFactory = SdcKafkaProducerFactory.create(settings);
  SdcKafkaProducer sdcKafkaProducer = sdcKafkaProducerFactory.create();
  sdcKafkaProducer.init();

  // write 10 messages
  for(int i = 0; i < 10; i++) {
    sdcKafkaProducer.enqueueMessage(getTopic(), message.getBytes(), "0");
  }
  sdcKafkaProducer.write(null);

  // read 10 messages
  List<MessageAndOffset> read = new ArrayList<>();
  while(read.size() < 10) {
    MessageAndOffset messageAndOffset = sdcKafkaConsumer.read();
    if(messageAndOffset != null) {
      read.add(messageAndOffset);
    }
  }

  // verify
  Assert.assertNotNull(read);
  Assert.assertEquals(10, read.size());
}
 
Example 11
Source File: ITZipkinReceiver.java    From incubator-retired-htrace with Apache License 2.0 4 votes vote down vote up
@Test
public void testKafkaTransport() throws Exception {

  String topic = "zipkin";
  // Kafka setup
  EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
  ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
  Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false);
  KafkaConfig config = new KafkaConfig(props);
  KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime());

  Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer));
  TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties());
  zkClient.close();
  TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000);

  // HTrace
  HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs(
      "sampler.classes", "AlwaysSampler",
      "span.receiver.classes", ZipkinSpanReceiver.class.getName(),
      "zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(),
      "zipkin.kafka.topic", topic,
      ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName()
  );

  final Tracer tracer = new Tracer.Builder("test-tracer")
      .tracerPool(new TracerPool("test-tracer-pool"))
      .conf(hTraceConfiguration)
      .build();

  String scopeName = "test-kafka-transport-scope";
  TraceScope traceScope = tracer.newScope(scopeName);
  traceScope.close();
  tracer.close();

  // Kafka consumer
  Properties consumerProps = new Properties();
  consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect"));
  consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group");
  consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest");
  ConsumerConnector connector =
      kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps));
  Map<String, Integer> topicCountMap = new HashMap<>();
  topicCountMap.put(topic, 1);
  Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap);
  ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator();

  // Test
  Assert.assertTrue("We should have one message in Kafka", it.hasNext());
  Span span = new Span();
  new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message());
  Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName);

  kafkaServer.shutdown();

}
 
Example 12
Source File: ZookeeperBrokersTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "4"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example 13
Source File: KafkaMessageNewReceiverTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {

    }
}
 
Example 14
Source File: KafkaMessageNewReceiverPoolTest_3.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "2"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example 15
Source File: KafkaCommandTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {
        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        Time mock = new SystemTime();
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {

    }
}
 
Example 16
Source File: KafkaMessageSenderPoolTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example 17
Source File: KafkaMessageNewSenderPoolTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example 18
Source File: NewSenderWithSpringTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {


        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);

    } catch (Exception e) {
    }
}
 
Example 19
Source File: ReceiverTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {


        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example 20
Source File: ZookeeperHostsTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}