kafka.admin.TopicCommand Java Examples

The following examples show how to use kafka.admin.TopicCommand. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaTool.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * Create a topic. This method will not create a topic that is currently scheduled for deletion.
 * For valid configs see https://cwiki.apache.org/confluence/display/KAFKA/Replication+tools#Replicationtools-Howtousethetool?.3
 *
 * @See https://kafka.apache.org/documentation.html#topic-config 
 * for more valid configs
 * */
public void createTopic(String[] args) throws Exception {
  int sessionTimeoutMs = 10000;
  int connectionTimeoutMs = 10000;

  TopicCommandOptions options = new TopicCommandOptions(args);
  ZkClient client = new ZkClient(zkConnects, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
  if (topicExits(name)) {
    TopicCommand.deleteTopic(client, options);
  }

  TopicCommand.createTopic(client, options);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {
    e.printStackTrace();
  }
  client.close();
}
 
Example #2
Source File: TestKafkaSink.java    From suro with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultithread() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1\n" +
            "}";

    KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>() {
    });
    sink.open();
    int msgCount = 10000;
    sendMessages(TOPIC_NAME_MULTITHREAD, sink, msgCount);
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    System.out.println(sink.getStat());
    assertEquals(sink.getNumOfPendingMessages(), 0);

    checkConsumer(TOPIC_NAME_MULTITHREAD, msgCount - (int) sink.droppedRecords.get());
}
 
Example #3
Source File: KafkaOperatorTestBase.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public void createTopic(int clusterid, String topicName)
{
  String[] args = new String[9];
  args[0] = "--zookeeper";
  args[1] = "localhost:" + TEST_ZOOKEEPER_PORT[clusterid];
  args[2] = "--replication-factor";
  args[3] = "1";
  args[4] = "--partitions";
  if (hasMultiPartition) {
    args[5] = "2";
  } else {
    args[5] = "1";
  }
  args[6] = "--topic";
  args[7] = topicName;
  args[8] = "--create";

  ZkUtils zu = ZkUtils.apply("localhost:" + TEST_ZOOKEEPER_PORT[clusterid], 30000, 30000, false);
  TopicCommand.createTopic(zu, new TopicCommand.TopicCommandOptions(args));

}
 
Example #4
Source File: KafkaOperatorTestBase.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public void createTopic(int clusterid, String topicName)
{
  String[] args = new String[9];
  args[0] = "--zookeeper";
  args[1] = "localhost:" + TEST_ZOOKEEPER_PORT[clusterid];
  args[2] = "--replication-factor";
  args[3] = "1";
  args[4] = "--partitions";
  if (hasMultiPartition) {
    args[5] = "2";
  } else {
    args[5] = "1";
  }
  args[6] = "--topic";
  args[7] = topicName;
  args[8] = "--create";

  ZkUtils zu = ZkUtils.apply("localhost:" + TEST_ZOOKEEPER_PORT[clusterid], 30000, 30000, false);
  TopicCommand.createTopic(zu, new TopicCommand.TopicCommandOptions(args));

}
 
Example #5
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 6 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param config       配置参数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              String... config) {

    StringBuffer updateOptions = new StringBuffer();

    updateOptions.append("--alter").append(space)
            .append("--topic").append(space).append(topic);

    for (int i = 0; i < config.length; i++) {

        if (config[i].indexOf("=") > 0)

            updateOptions.append(space).append("--config").append(space)
                    .append(config[i]);
        else
            updateOptions.append(space).append("--delete-config")
                    .append(space).append(config[i]);
    }

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            updateOptions.toString().split(space)));
}
 
Example #6
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 6 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param partitions   分区个数
 * @param config       配置参数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              int partitions, String... config) {

    StringBuffer updateOptions = new StringBuffer();

    updateOptions.append("--alter").append(space)
            .append("--topic").append(space).append(topic).append(space)
            .append("--partitions").append(space).append(partitions);

    for (int i = 0; i < config.length; i++) {

        if (config[i].indexOf("=") > 0)

            updateOptions.append(space).append("--config").append(space)
                    .append(config[i]);
        else
            updateOptions.append(space).append("--delete-config")
                    .append(space).append(config[i]);
    }

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            updateOptions.toString().split(space)));
}
 
Example #7
Source File: KafkaStarterUtils.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
public static void createTopic(String kafkaTopic, int numOfPartitions, String zkStr, String replicatorFactor) {
  // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin
  try {
    String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", replicatorFactor,
        "--partitions", String.valueOf(numOfPartitions), "--topic", kafkaTopic};
    KafkaZkClient zkClient = KafkaZkClient
        .apply(zkStr, false, 3000, 3000, Integer.MAX_VALUE, Time.SYSTEM, "kafka.server",
            "SessionExpireListener");
    TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args);
    TopicCommand.createTopic(zkClient, opts);
  } catch (TopicExistsException e) {
    // Catch TopicExistsException otherwise it will break maven-surefire-plugin
    System.out.println("Topic already existed");
  }
}
 
Example #8
Source File: KafkaOperatorTestBase.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void createTopic(int clusterid, String topicName)
{
  String[] args = new String[9];
  args[0] = "--zookeeper";
  args[1] = "localhost:" + TEST_ZOOKEEPER_PORT[clusterid];
  args[2] = "--replication-factor";
  args[3] = "1";
  args[4] = "--partitions";
  if (hasMultiPartition) {
    args[5] = "2";
  } else {
    args[5] = "1";
  }
  args[6] = "--topic";
  args[7] = topicName;
  args[8] = "--create";

  TopicCommand.main(args);
  // Right now, there is no programmatic synchronized way to create the topic. have to wait 2 sec to make sure the
  // topic is created
  // So the tests will not hit any bizarre failure
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {
    e.printStackTrace();
  }
}
 
Example #9
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: listTopics</p>
 * <p>Description: 查询队列列表操作</p>
 *
 * @param zookeeperStr zookeeper地址
 */
public static void listTopics(String zookeeperStr) {

    TopicCommand.listTopics(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--list"}));
}
 
Example #10
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: deleteTopic</p>
 * <p>Description: 删除队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 */
public static void deleteTopic(String zookeeperStr, String topic) {

    TopicCommand.deleteTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--delete", "--topic", topic}));
}
 
Example #11
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param partitions   分区个数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              int partitions) {

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--alter", "--topic", topic,
                    "--partitions", String.valueOf(partitions)}));
}
 
Example #12
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: describeTopic</p>
 * <p>Description: 查询队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 */
public static void describeTopic(String zookeeperStr, String topic) {

    TopicCommand.describeTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--describe", "--topic", topic}));
}
 
Example #13
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: createTopic</p>
 * <p>Description: 创建队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param replications 复制个数
 * @param partitions   分区个数
 */
public static void createTopic(String zookeeperStr, String topic,
                               int replications, int partitions) {

    TopicCommand.createTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--create", "--topic", topic,
                    "--replication-factor", String.valueOf(replications),
                    "--partitions", String.valueOf(partitions)}));
}
 
Example #14
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testStartWithKafkaOutage() throws Throwable {
    String topicName = TOPIC_NAME + "kafkaoutage";

    TopicCommand.createTopic(zk.getZkClient(),
        new TopicCommand.TopicCommandOptions(new String[]{
            "--zookeeper", "dummy", "--create", "--topic", topicName,
            "--replication-factor", "2", "--partitions", "1"}));

    String[] brokerList = kafkaServer.getBrokerListStr().split(",");
    int port1 = Integer.parseInt(brokerList[0].split(":")[1]);
    int port2 = Integer.parseInt(brokerList[1].split(":")[1]);

    String description = "{\n" +
        "    \"type\": \"kafka\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"acks\": 1\n" +
        "    }" +
        "}";

    kafkaServer.shutdown();


    final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();

    final int msgCount = 10000;
    final CountDownLatch latch = new CountDownLatch(1);
    sink.setRecordCounterListener(new Action3<Long, Long, Long>() {

        @Override
        public void call(Long queued, Long sent, Long dropped) {
            if (sent == msgCount - sink.droppedRecords.get()) {
                latch.countDown();
            }
        }
    });

    sendMessages(topicName, sink, msgCount);

    kafkaServer.startServer(port1, port2); // running up
    assertTrue(latch.await(10, TimeUnit.SECONDS));

    sendMessages(topicName, sink, msgCount);
    sink.close();

    checkConsumer(topicName, 2 * msgCount - (int) sink.droppedRecords.get());
}
 
Example #15
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigBackwardCompatible() throws IOException {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"ack\": 1,\n" +
            "     \"compression.type\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";
    String description2 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "     \"compression.codec\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sink1.open();
    sink2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sink1);
    sinks.add(sink2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
            sink.writeTo(new DefaultMessageContainer(
                    new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                    jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sink1.close();
    sink2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example #16
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testBlockingOnBufferFull() throws Throwable {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME + "buffer_full",
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1,\n" +
            "    \"block.on.buffer.full\": true,\n" +
            "    \"buffer.memory\": 1000,\n" +
            "    \"batch.size\": 1000\n" +
            "}";


    final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    final CountDownLatch latch = new CountDownLatch(1);
    final CountDownLatch shutdownLatch = new CountDownLatch(1);
    new Thread(new Runnable() {

        @Override
        public void run() {
            for (int i = 0; i < 100; ++i) {
                try {
                    sink.writeTo(new DefaultMessageContainer(new Message(TOPIC_NAME + "buffer_full", getBigData()), jsonMapper));
                } catch (Exception e) {
                    fail("exception thrown: " + e.toString());
                }
                if (i == 50) {
                    try{
                        kafkaServer.shutdown(); // to simulate kafka latency
                    }finally {
                        shutdownLatch.countDown();
                    }
                }
            }
            latch.countDown();
        }
    }).start();
    latch.await(3, TimeUnit.SECONDS);
    assertEquals(latch.getCount(), 1); // blocked

    // Make sure the kafka server is restarted only if shutdown is successful.
    shutdownLatch.await();
    kafkaServer.before();
}
 
Example #17
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testCheckPause() throws IOException, InterruptedException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME + "check_pause",
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1,\n" +
            "    \"buffer.memory\": 1000,\n" +
            "    \"batch.size\": 1000\n" +
            "}";


    final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();

    final AtomicBoolean exceptionCaught = new AtomicBoolean(false);
    final AtomicBoolean checkPaused = new AtomicBoolean(false);
    final AtomicBoolean pending = new AtomicBoolean(false);
    final CountDownLatch latch = new CountDownLatch(1);

    sink.setRecordCounterListener(new Action3<Long, Long, Long>() {

        @Override
        public void call(Long queued, Long sent, Long dropped) {
            if (dropped > 0) {
                exceptionCaught.set(true);
                if (sink.checkPause() > 0) {
                    checkPaused.set(true);
                }
                if (sink.getNumOfPendingMessages() > 0) {
                    pending.set(true);
                }
                latch.countDown();
            }
        }
    });
    for (int i = 0; i < 100; ++i) {
        sink.writeTo(new DefaultMessageContainer(new Message(TOPIC_NAME + "check_pause", getBigData()), jsonMapper));
    }
    assertTrue(latch.await(10, TimeUnit.SECONDS));
    assertTrue(exceptionCaught.get());
    assertTrue(checkPaused.get());
    assertTrue(pending.get());
}
 
Example #18
Source File: ZookeeperBrokersTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "4"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #19
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1\n" +
            "}";


    KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    while (msgIterator.hasNext()) {
        sink.writeTo(new StringMessage(msgIterator.next()));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
    assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1);
}
 
Example #20
Source File: LocalKafkaServer.java    From Krackle with Apache License 2.0 4 votes vote down vote up
public void createTopic(String topic) {
	TopicCommandOptions createOpts = new TopicCommandOptions(new String[]{"--create", "--zookeeper",
                       "localhost:21818", "--replication-factor", "1", "--partition", "1",
                       "--topic", topic});
               TopicCommand.createTopic(zkUtils,createOpts);
}
 
Example #21
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    // create send test messages to Kafka
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
    while (msgIterator.hasNext()) {
        StringMessage next = new StringMessage(msgIterator.next());
        sink.writeTo(next); // send
        sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
    }
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    // get data back from Kafka
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    for( int i=0; i<messageSet.size(); i++ ){
        // ensure that received message was one that was sent
        String receivedPayload = new String(extractMessage(messageSet, i));
        System.out.println( "Got message: " + new String( receivedPayload ) );
        assert( sentPayloads.remove( receivedPayload ) );
    }
    assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
 
Example #22
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultithread() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "    \"batchSize\": 10,\n" +
            "    \"jobQueueSize\": 3\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    int msgCount = 10000;
    for (int i = 0; i < msgCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", Integer.toString(i))
                .put("value", "message:" + i).build();
        sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    System.out.println(sink.getStat());
    assertEquals(sink.getNumOfPendingMessages(), 0);

    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
    for (int i = 0; i < msgCount; ++i) {
        stream.iterator().next();
    }

    try {
        stream.iterator().next();
        fail();
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example #23
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
        "    \"type\": \"kafkaV1\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"ack\": 1,\n" +
        keyTopicMap + "\n" +
        "}";
    String description2 = "{\n" +
        "    \"type\": \"kafkaV2\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"request.required.acks\": 1,\n" +
        keyTopicMap + "\n" +
        "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sinkV1.open();
    sinkV2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sinkV1);
    sinks.add(sinkV2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
        createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
                                            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
          sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sinkV1.close();
    sinkV2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example #24
Source File: NewReceiverWithSpringTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #25
Source File: KafkaMessageReceiverImplTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        Properties kafkaProps2 = TestUtils.createBrokerConfig(brokerId + 1,
                zkConnect, false, false, (port - 1),
                noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false,
                TestUtils.RandomPort(), false, TestUtils.RandomPort(), false,
                TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps2.setProperty("auto.create.topics.enable", "true");
        kafkaProps2.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps2.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps2.setProperty("host.name", "localhost");
        kafkaProps2.setProperty("port", (port - 1) + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        KafkaConfig config2 = new KafkaConfig(kafkaProps2);

        Time mock = new SystemTime();
        Time mock2 = new SystemTime();

        kafkaServer = TestUtils.createServer(config, mock);
        KafkaServer kafkaServer2 = TestUtils.createServer(config2, mock2);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "2", "--partitions", "2"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        servers.add(kafkaServer2);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #26
Source File: TestKafkaProvider.java    From kafka-eagle with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {
	String[] options = new String[] { "--alter", "--zookeeper", "slave01:2181", "--partitions", "6", "--topic", "KE_TTT_1200" };
	TopicCommand.main(options);
}
 
Example #27
Source File: NewSenderWithSpringTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {


        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);

    } catch (Exception e) {
    }
}
 
Example #28
Source File: SenderTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {
        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #29
Source File: KafkaMessageReceiverPoolTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {
        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #30
Source File: KafkaMessageNewReceiverPoolTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() throws Exception {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);

        KafkaConfig config = new KafkaConfig(kafkaProps);
        Time mock = new SystemTime();
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "2"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}