org.apache.kafka.clients.consumer.OffsetResetStrategy Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.OffsetResetStrategy. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaIOTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testUnboundedSourceWithExplicitPartitions() {
  int numElements = 1000;

  List<String> topics = ImmutableList.of("test");

  KafkaIO.Read<byte[], Long> reader =
      KafkaIO.<byte[], Long>read()
          .withBootstrapServers("none")
          .withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5)))
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions
          .withKeyDeserializer(ByteArrayDeserializer.class)
          .withValueDeserializer(LongDeserializer.class)
          .withMaxNumRecords(numElements / 10);

  PCollection<Long> input = p.apply(reader.withoutMetadata()).apply(Values.create());

  // assert that every element is a multiple of 5.
  PAssert.that(input).satisfies(new AssertMultipleOf(5));

  PAssert.thatSingleton(input.apply(Count.globally())).isEqualTo(numElements / 10L);

  p.run();
}
 
Example #2
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testPollTimeout(TestContext ctx) throws Exception {
  Async async = ctx.async();
  String topicName = "testPollTimeout";
  Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0);
  KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config);

  int pollingTimeout = 1500;
  // Set the polling timeout to 1500 ms (default is 1000)
  consumerWithCustomTimeout.pollTimeout(Duration.ofMillis(pollingTimeout));
  // Subscribe to the empty topic (we want the poll() call to timeout!)
  consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> {
    consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once
    long beforeSeek = System.currentTimeMillis();
    consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> {
      long durationWShortTimeout = System.currentTimeMillis() - beforeSeek;
      ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout");
      consumerWithCustomTimeout.close();
      async.countDown();
    });
  });
}
 
Example #3
Source File: ProcessingConfigTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void constructor_defaults() throws IOException {
    assertTrue(config.getCommitInitialOffset());
    assertThat(config.getCommitSizeThreshold(), is(Long.parseLong(ProcessingConfig.COMMIT_SIZE_THRESHOLD_DEFAULT)));
    assertThat(config.getCommitTimeThreshold(), is(Long.parseLong(ProcessingConfig.COMMIT_TIME_THRESHOLD_DEFAULT)));
    assertThat(config.getFailPauseTime(), is(Long.parseLong(ProcessingConfig.FAIL_PAUSE_TIME_DEFAULT)));
    assertThat(config.getFailSampleSize(), is(Integer.parseInt(ProcessingConfig.FAIL_SAMPLE_SIZE_DEFAULT)));
    assertThat(config.getFailThreshold(), is(Double.parseDouble(ProcessingConfig.FAIL_THRESHOLD_DEFAULT)));
    assertThat(config.getOffsetResetStrategy(), is(OffsetResetStrategy.EARLIEST));
    assertThat(config.getMaxPollInterval(), is(300000L));

    // config properties should at least contain everything from properties
    for(Map.Entry<Object, Object> entry : properties.entrySet()) {
        assertThat(config.getProperties().get(entry.getKey()), is(entry.getValue()));
    }

    assertThat(config.getProperties().getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG), is(Boolean.FALSE.toString()));
}
 
Example #4
Source File: ProcessingPartitionTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Before
public void before() {
    properties = new Properties();
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, OffsetResetStrategy.EARLIEST.toString().toLowerCase());

    config = new ProcessingConfig(properties);
    topicPartition = new TopicPartition("topic", 1);

    when(consumer.committed(topicPartition)).thenReturn(new OffsetAndMetadata(0L));

    partition = new MockProcessingPartition<>(topicPartition, config, consumer);

    logAppender = new TestLogAppender();
    RootLogger.getRootLogger().addAppender(logAppender);
}
 
Example #5
Source File: ProcessingPartition.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the offset that would be used for the partition based on the consumer's configured offset reset strategy
 *
 * @return the offset that would be used for the partition based on the consumer's configured offset reset strategy
 */
protected long getResetOffset() {
    OffsetResetStrategy strategy = config.getOffsetResetStrategy();

    if (strategy == OffsetResetStrategy.EARLIEST) {
        LOGGER.debug("Looking up offset for partition [{}] using earliest reset", topicPartition);
        return getEarliestOffset();
    }
    else if (strategy == OffsetResetStrategy.LATEST) {
        LOGGER.debug("Looking up offset for partition [{}] using latest reset", topicPartition);
        return getLatestOffset();
    }
    else {
        throw new IllegalStateException("Unable to reset partition to previously committed offset as there is no"
                + " offset for partition [" + topicPartition + "] and offset reset strategy [" + strategy + "] is unknown");
    }
}
 
Example #6
Source File: AdminClientTest.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testListConsumerGroups(TestContext ctx) {

  KafkaAdminClient adminClient = KafkaAdminClient.create(this.vertx, config);

  Async async = ctx.async();

  //kafkaCluster.useTo().consumeStrings(() -> true, null, Collections.singletonList("first-topic"), c -> { });

  kafkaCluster.useTo().consume("groupId", "clientId", OffsetResetStrategy.EARLIEST,
    new StringDeserializer(), new StringDeserializer(), () -> true, null, null,
    Collections.singleton("first-topic"), c -> { });

  // timer because, Kafka cluster takes time to start consumer
  vertx.setTimer(1000, t -> {

    adminClient.listConsumerGroups(ctx.asyncAssertSuccess(groups -> {

      ctx.assertTrue(groups.size() > 0);
      ctx.assertTrue(groups.stream().map(ConsumerGroupListing::getGroupId).anyMatch(g -> g.equals("groupId")));
      adminClient.close();
      async.complete();
    }));

  });
}
 
Example #7
Source File: SourcePartitionValidatorTest.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Test
public void shouldApplyTopicRenameWhenCheckingHealth() {
  MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  List<PartitionInfo> partitionInfoList =
      Arrays.asList(
          new PartitionInfo("replace1", 0, null, null, null),
          new PartitionInfo("replace1", 1, null, null, null));
  consumer.updatePartitions("replace1", partitionInfoList);

  SourcePartitionValidator sourcePartitionHealthChecker =
      new SourcePartitionValidator(
          consumer,
          SourcePartitionValidator.MatchingStrategy.TOPIC,
          t -> t.equals("topic1") ? "replace1" : t);
  assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic1", 0)), is(true));
  assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic1", 2)), is(true));
  assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic2", 0)), is(false));
}
 
Example #8
Source File: ConsumerMockTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testConsume(TestContext ctx) throws Exception {
  MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
  Async doneLatch = ctx.async();
  consumer.handler(record -> {
    ctx.assertEquals("the_topic", record.topic());
    ctx.assertEquals(0, record.partition());
    ctx.assertEquals("abc", record.key());
    ctx.assertEquals("def", record.value());
    consumer.close(v -> doneLatch.complete());
  });
  consumer.subscribe(Collections.singleton("the_topic"), v -> {
    mock.schedulePollTask(() -> {
      mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
      mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, "abc", "def"));
      mock.seek(new TopicPartition("the_topic", 0), 0L);
    });
  });
}
 
Example #9
Source File: ConsumerMockTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testConsumeWithHeader(TestContext ctx) {
  MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
  Async doneLatch = ctx.async();
  consumer.handler(record -> {
    ctx.assertEquals("the_topic", record.topic());
    ctx.assertEquals(0, record.partition());
    ctx.assertEquals("abc", record.key());
    ctx.assertEquals("def", record.value());
    Header[] headers = record.headers().toArray();
    ctx.assertEquals(1, headers.length);
    Header header = headers[0];
    ctx.assertEquals("header_key", header.key());
    ctx.assertEquals("header_value", new String(header.value()));
    consumer.close(v -> doneLatch.complete());
  });
  consumer.subscribe(Collections.singleton("the_topic"), v -> {
    mock.schedulePollTask(() -> {
      mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
      mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0L, 0, 0, "abc", "def",
        new RecordHeaders(Collections.singletonList(new RecordHeader("header_key", "header_value".getBytes())))));
      mock.seek(new TopicPartition("the_topic", 0), 0L);
    });
  });
}
 
Example #10
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testBatchHandler(TestContext ctx) throws Exception {
  String topicName = "testBatchHandler";
  String consumerId = topicName;
  Async batch1 = ctx.async();
  AtomicInteger index = new AtomicInteger();
  int numMessages = 500;
  kafkaCluster.useTo().produceStrings(numMessages, batch1::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch1.awaitSuccess(10000);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Context context = vertx.getOrCreateContext();
  consumer = createConsumer(context, config);
  Async batchHandler = ctx.async();
  consumer.batchHandler(records -> {
    ctx.assertEquals(numMessages, records.count());
    batchHandler.complete();
  });
  consumer.exceptionHandler(ctx::fail);
  consumer.handler(rec -> {});
  consumer.subscribe(Collections.singleton(topicName));
}
 
Example #11
Source File: KafkaIOTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testSourceWithExplicitPartitionsDisplayData() {
  KafkaIO.Read<byte[], byte[]> read =
      KafkaIO.readBytes()
          .withBootstrapServers("myServer1:9092,myServer2:9092")
          .withTopicPartitions(
              ImmutableList.of(new TopicPartition("test", 5), new TopicPartition("test", 6)))
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  Lists.newArrayList("test"),
                  10,
                  10,
                  OffsetResetStrategy.EARLIEST)); // 10 partitions

  DisplayData displayData = DisplayData.from(read);

  assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6"));
  assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
  assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
  assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
  assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
 
Example #12
Source File: MockKafkaClientFactory.java    From kafka-pubsub-emulator with Apache License 2.0 6 votes vote down vote up
@Override
public Consumer<String, ByteBuffer> createConsumer(String subscription) {
  MockConsumer<String, ByteBuffer> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  if (!createdConsumers.containsKey(subscription)) {
    createdConsumers.put(subscription, new ArrayList<>());
  }
  createdConsumers.get(subscription).add(consumer);

  MockConsumerConfiguration configuration = consumerConfigurations.get(subscription);
  if (configuration != null) {
    consumer.updatePartitions(configuration.topic, configuration.partitionInfoList);
    consumer.updateBeginningOffsets(configuration.startOffsets);
    consumer.updateEndOffsets(configuration.endOffsets);
  }
  return consumer;
}
 
Example #13
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testPartitionsFor(TestContext ctx) throws Exception {
  String topicName = "testPartitionsFor";
  String consumerId = topicName;
  kafkaCluster.createTopic(topicName, 2, 1);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Context context = vertx.getOrCreateContext();
  consumer = createConsumer(context, config);

  Async done = ctx.async();

  consumer.partitionsFor(topicName, ar -> {
    if (ar.succeeded()) {
      List<PartitionInfo> partitionInfo = ar.result();
      ctx.assertEquals(2, partitionInfo.size());
    } else {
      ctx.fail();
    }
    done.complete();
  });
}
 
Example #14
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testConsume(TestContext ctx) throws Exception {
  final String topicName = "testConsume";
  String consumerId = topicName;
  Async batch = ctx.async();
  AtomicInteger index = new AtomicInteger();
  int numMessages = 1000;
  kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch.awaitSuccess(20000);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  consumer = createConsumer(vertx, config);
  Async done = ctx.async();
  AtomicInteger count = new AtomicInteger(numMessages);
  consumer.exceptionHandler(ctx::fail);
  consumer.handler(rec -> {
    if (count.decrementAndGet() == 0) {
      done.complete();
    }
  });
  consumer.subscribe(Collections.singleton(topicName));
}
 
Example #15
Source File: KafkaMockConsumer.java    From calcite with Apache License 2.0 6 votes vote down vote up
public KafkaMockConsumer(final OffsetResetStrategy offsetResetStrategy) {
  super(OffsetResetStrategy.EARLIEST);

  assign(Arrays.asList(new TopicPartition("testtopic", 0)));

  HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
  beginningOffsets.put(new TopicPartition("testtopic", 0), 0L);
  updateBeginningOffsets(beginningOffsets);

  for (int idx = 0; idx < 10; ++idx) {
    addRecord(
        new ConsumerRecord<>("testtopic",
            0, idx,
            ("mykey" + idx).getBytes(StandardCharsets.UTF_8),
            ("myvalue" + idx).getBytes(StandardCharsets.UTF_8)));
  }
}
 
Example #16
Source File: KafkaIOTest.java    From beam with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a consumer with two topics, with 10 partitions each. numElements are (round-robin)
 * assigned all the 20 partitions.
 */
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
    int numElements,
    int maxNumRecords,
    @Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {

  List<String> topics = ImmutableList.of("topic_a", "topic_b");

  KafkaIO.Read<Integer, Long> reader =
      KafkaIO.<Integer, Long>read()
          .withBootstrapServers("myServer1:9092,myServer2:9092")
          .withTopics(topics)
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions
          .withKeyDeserializer(IntegerDeserializer.class)
          .withValueDeserializer(LongDeserializer.class)
          .withMaxNumRecords(maxNumRecords);

  if (timestampFn != null) {
    return reader.withTimestampFn(timestampFn);
  } else {
    return reader;
  }
}
 
Example #17
Source File: KafkaUsage.java    From smallrye-reactive-messaging with Apache License 2.0 6 votes vote down vote up
public Properties getConsumerProperties(String groupId, String clientId, OffsetResetStrategy autoOffsetReset) {
    if (groupId == null) {
        throw new IllegalArgumentException("The groupId is required");
    } else {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", brokers);
        props.setProperty("group.id", groupId);
        props.setProperty("enable.auto.commit", Boolean.FALSE.toString());
        if (autoOffsetReset != null) {
            props.setProperty("auto.offset.reset",
                    autoOffsetReset.toString().toLowerCase());
        }

        if (clientId != null) {
            props.setProperty("client.id", clientId);
        }

        return props;
    }
}
 
Example #18
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testReadAvroSpecificRecordsWithConfluentSchemaRegistry() {
  int numElements = 100;
  String topic = "my_topic";
  String schemaRegistryUrl = "mock://my-scope-name";
  String valueSchemaSubject = topic + "-value";

  List<KV<Integer, AvroGeneratedUser>> inputs = new ArrayList<>();
  for (int i = 0; i < numElements; i++) {
    inputs.add(KV.of(i, new AvroGeneratedUser("ValueName" + i, i, "color" + i)));
  }

  KafkaIO.Read<Integer, AvroGeneratedUser> reader =
      KafkaIO.<Integer, AvroGeneratedUser>read()
          .withBootstrapServers("localhost:9092")
          .withTopic(topic)
          .withKeyDeserializer(IntegerDeserializer.class)
          .withValueDeserializer(
              mockDeserializerProvider(schemaRegistryUrl, valueSchemaSubject, null))
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  ImmutableList.of(topic),
                  1,
                  numElements,
                  OffsetResetStrategy.EARLIEST,
                  i -> ByteBuffer.wrap(new byte[4]).putInt(i).array(),
                  new ValueAvroSerializableFunction(topic, schemaRegistryUrl)))
          .withMaxNumRecords(numElements);

  PCollection<KV<Integer, AvroGeneratedUser>> input = p.apply(reader.withoutMetadata());

  PAssert.that(input).containsInAnyOrder(inputs);
  p.run();
}
 
Example #19
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testReadAvroGenericRecordsWithConfluentSchemaRegistry() {
  int numElements = 100;
  String topic = "my_topic";
  String schemaRegistryUrl = "mock://my-scope-name";
  String keySchemaSubject = topic + "-key";
  String valueSchemaSubject = topic + "-value";

  List<KV<GenericRecord, GenericRecord>> inputs = new ArrayList<>();
  for (int i = 0; i < numElements; i++) {
    inputs.add(
        KV.of(
            new AvroGeneratedUser("KeyName" + i, i, "color" + i),
            new AvroGeneratedUser("ValueName" + i, i, "color" + i)));
  }

  KafkaIO.Read<GenericRecord, GenericRecord> reader =
      KafkaIO.<GenericRecord, GenericRecord>read()
          .withBootstrapServers("localhost:9092")
          .withTopic(topic)
          .withKeyDeserializer(
              mockDeserializerProvider(schemaRegistryUrl, keySchemaSubject, null))
          .withValueDeserializer(
              mockDeserializerProvider(schemaRegistryUrl, valueSchemaSubject, null))
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  ImmutableList.of(topic),
                  1,
                  numElements,
                  OffsetResetStrategy.EARLIEST,
                  new KeyAvroSerializableFunction(topic, schemaRegistryUrl),
                  new ValueAvroSerializableFunction(topic, schemaRegistryUrl)))
          .withMaxNumRecords(numElements);

  PCollection<KV<GenericRecord, GenericRecord>> input = p.apply(reader.withoutMetadata());

  PAssert.that(input).containsInAnyOrder(inputs);
  p.run();
}
 
Example #20
Source File: KafkaReadStreamMockTest.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
private MockConsumer<String, String> createMockConsumer(){
    MockConsumer<String, String> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);

    Map<org.apache.kafka.common.TopicPartition, Long> beginningOffsets = new HashMap<>();
    beginningOffsets.put(new org.apache.kafka.common.TopicPartition(TOPIC, 0), 0L);
    consumer.updateBeginningOffsets(beginningOffsets);
    return consumer;
}
 
Example #21
Source File: KafkaClientProperties.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("Regexp") // for the `.toLowerCase()` because kafka needs this property as lower-case
@SuppressFBWarnings("DM_CONVERT_CASE")
public KafkaClientPropertiesBuilder withAutoOffsetResetConfig(OffsetResetStrategy offsetResetConfig) {

    this.properties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offsetResetConfig.name().toLowerCase());
    return this;
}
 
Example #22
Source File: BasicExternalKafkaClient.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
/**
 * Receive messages to external entrypoint of the cluster with PLAINTEXT security protocol setting
 * @return received message count
 */
public int receiveMessagesPlain(long timeoutMs) {

    String clientName = "receiver-plain-" + clusterName;
    CompletableFuture<Integer> resultPromise = new CompletableFuture<>();
    IntPredicate msgCntPredicate = x -> x == messageCount;

    KafkaClientProperties properties = this.clientProperties;

    if (properties == null || properties.getProperties().isEmpty()) {
        properties = new KafkaClientProperties.KafkaClientPropertiesBuilder()
            .withNamespaceName(namespaceName)
            .withClusterName(clusterName)
            .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName))
            .withKeyDeserializerConfig(StringDeserializer.class)
            .withValueDeserializerConfig(StringDeserializer.class)
            .withClientIdConfig("consumer-plain-" + new Random().nextInt(Integer.MAX_VALUE))
            .withAutoOffsetResetConfig(OffsetResetStrategy.EARLIEST)
            .withGroupIdConfig(consumerGroup)
            .withSecurityProtocol(SecurityProtocol.PLAINTEXT)
            .withSharedProperties()
            .build();
    }

    try (Consumer plainConsumer = new Consumer(properties, resultPromise, msgCntPredicate, this.topicName, clientName)) {

        plainConsumer.getVertx().deployVerticle(plainConsumer);

        return plainConsumer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        e.printStackTrace();
        throw new WaitException(e);
    }
}
 
Example #23
Source File: OauthExternalKafkaClient.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Override
public int receiveMessagesPlain(long timeoutMs) {
    String clientName = "receiver-plain-" + clusterName;
    CompletableFuture<Integer> resultPromise = new CompletableFuture<>();
    IntPredicate msgCntPredicate = x -> x == messageCount;

    KafkaClientProperties properties = this.clientProperties;

    if (properties == null || properties.getProperties().isEmpty()) {
        properties = new KafkaClientProperties.KafkaClientPropertiesBuilder()
            .withNamespaceName(namespaceName)
            .withClusterName(clusterName)
            .withGroupIdConfig(consumerGroup)
            .withSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT)
            .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName))
            .withKeyDeserializerConfig(StringDeserializer.class)
            .withValueDeserializerConfig(StringDeserializer.class)
            .withClientIdConfig(kafkaUsername + "-consumer")
            .withAutoOffsetResetConfig(OffsetResetStrategy.EARLIEST)
            .withSaslMechanism(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)
            .withSaslLoginCallbackHandlerClass()
            .withSharedProperties()
            .withSaslJassConfig(this.clientId, this.clientSecretName, this.oauthTokenEndpointUri)
            .build();
    }

    try (Consumer plainConsumer = new Consumer(properties, resultPromise, msgCntPredicate, topicName, clientName)) {

        plainConsumer.getVertx().deployVerticle(plainConsumer);

        return plainConsumer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        e.printStackTrace();
        throw new WaitException(e);
    }
}
 
Example #24
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
ConsumerFactoryFn(
    List<String> topics,
    int partitionsPerTopic,
    int numElements,
    OffsetResetStrategy offsetResetStrategy,
    SerializableFunction<Integer, byte[]> keyFunction,
    SerializableFunction<Integer, byte[]> valueFunction) {
  this.topics = topics;
  this.partitionsPerTopic = partitionsPerTopic;
  this.numElements = numElements;
  this.offsetResetStrategy = offsetResetStrategy;
  this.keyFunction = keyFunction;
  this.valueFunction = valueFunction;
}
 
Example #25
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testCommitAfterPoll(TestContext ctx) throws Exception {

  String topicName = "testCommitAfterPoll";
  String consumerId = topicName;
  Async batch = ctx.async();
  AtomicInteger index = new AtomicInteger();
  int numMessages = 10;
  kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch.awaitSuccess(10000);

  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  consumer = createConsumer(vertx, config);
  consumer.exceptionHandler(ctx::fail);

  Async subscribe = ctx.async();
  consumer.subscribe(Collections.singleton(topicName), ar1 -> {
    subscribe.complete();
  });
  subscribe.await();

  Async consume = ctx.async();
  consumer.poll(Duration.ofSeconds(10), rec -> {
    if (rec.result().count() == 10) {
      consume.countDown();
    }
  });
  consume.await();

  Async committed = ctx.async();
  TopicPartition the_topic = new TopicPartition(topicName, 0);
  consumer.commit(Collections.singletonMap(the_topic, new OffsetAndMetadata(10)), ar2 -> {
    committed.complete();
  });
  committed.await();
}
 
Example #26
Source File: HelloConsumer.java    From talk-kafka-zipkin with MIT License 5 votes vote down vote up
public static void main(String[] args) {

		final var config = ConfigFactory.load();
		/* START TRACING INSTRUMENTATION */
		final var sender = URLConnectionSender.newBuilder()
				.endpoint(config.getString("zipkin.endpoint")).build();
		final var reporter = AsyncReporter.builder(sender).build();
		final var tracing = Tracing.newBuilder().localServiceName("hello-consumer")
				.sampler(Sampler.ALWAYS_SAMPLE).spanReporter(reporter).build();
		final var kafkaTracing = KafkaTracing.newBuilder(tracing)
				.remoteServiceName("kafka").build();
		/* END TRACING INSTRUMENTATION */

		final var consumerConfigs = new Properties();
		consumerConfigs.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
				config.getString("kafka.bootstrap-servers"));
		consumerConfigs.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
				StringDeserializer.class.getName());
		consumerConfigs.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
				StringDeserializer.class.getName());
		consumerConfigs.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "hello-consumer");
		consumerConfigs.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
				OffsetResetStrategy.EARLIEST.name().toLowerCase());
		final var kafkaConsumer = new KafkaConsumer<String, String>(consumerConfigs);
		final var tracingConsumer = kafkaTracing.consumer(kafkaConsumer);

		tracingConsumer.subscribe(Collections.singletonList("hello"));

		while (!Thread.interrupted()) {
			var records = tracingConsumer.poll(Duration.ofMillis(Long.MAX_VALUE));
			for (var record : records) {
				brave.Span span = kafkaTracing.nextSpan(record).name("print-hello")
						.start();
				span.annotate("starting printing");
				out.println(String.format("Record: %s", record));
				span.annotate("printing finished");
				span.finish();
			}
		}
	}
 
Example #27
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testPollExceptionHandler(TestContext ctx) throws Exception {
  Properties config = kafkaCluster.useTo().getConsumerProperties("someRandomGroup", "someRandomClientID", OffsetResetStrategy.EARLIEST);
  config.remove("group.id");
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  consumer = createConsumer(vertx, config);
  Async done = ctx.async();
  consumer.exceptionHandler(ex -> {
    ctx.assertTrue(ex instanceof InvalidGroupIdException);
    done.complete();
  });
  consumer.handler(System.out::println).subscribe(Collections.singleton("someTopic"));
}
 
Example #28
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumerBatchHandler(TestContext ctx) throws Exception {
  String topicName = "testConsumerBatchHandler";
  String consumerId = topicName;
  Async batch1 = ctx.async();
  AtomicInteger index = new AtomicInteger();
  int numMessages = 500;
  kafkaCluster.useTo().produceStrings(numMessages, batch1::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch1.awaitSuccess(10000);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  KafkaConsumer<Object, Object> wrappedConsumer = KafkaConsumer.create(vertx, config);
  wrappedConsumer.exceptionHandler(ctx::fail);
  AtomicInteger count = new AtomicInteger(numMessages);
  Async batchHandler = ctx.async();
  batchHandler.handler(ar -> wrappedConsumer.close());
  wrappedConsumer.batchHandler(records -> {
    ctx.assertEquals(numMessages, records.size());
    for (int i = 0; i < records.size(); i++) {
      KafkaConsumerRecord<Object, Object> record = records.recordAt(i);
      int dec = count.decrementAndGet();
      if (dec >= 0) {
        ctx.assertEquals("key-" + (numMessages - dec - 1), record.key());
      } else {
        ctx.assertEquals("key-" + (-1 - dec), record.key());
      }
    }
    batchHandler.complete();
  });
  wrappedConsumer.handler(rec -> {});
  wrappedConsumer.subscribe(Collections.singleton(topicName));
}
 
Example #29
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
private void testSeek(String topic, int numMessages, TestContext ctx, Runnable seeker, int abc) throws Exception {
  kafkaCluster.createTopic(topic, 1, 1);
  String consumerId = topic;
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Context context = vertx.getOrCreateContext();
  consumer = createConsumer(context, config);
  Async batch1 = ctx.async();
  AtomicInteger index = new AtomicInteger();
  kafkaCluster.useTo().produceStrings(numMessages, batch1::complete, () ->
    new ProducerRecord<>(topic, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch1.awaitSuccess(10000);
  AtomicInteger count = new AtomicInteger(numMessages);
  Async done = ctx.async();
  consumer.handler(record -> {
    int dec = count.decrementAndGet();
    if (dec >= 0) {
      ctx.assertEquals("key-" + (numMessages - dec - 1), record.key());
    } else {
      ctx.assertEquals("key-" + (-1 - dec), record.key());
    }
    if (dec == 0) {
      seeker.run();
    }
    if (dec == abc) {
      done.complete();
    }
  });
  consumer.subscribe(Collections.singleton(topic));
}
 
Example #30
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testListTopics(TestContext ctx) throws Exception {
  String topicName = "testListTopics";
  String consumerId = topicName;
  kafkaCluster.createTopic(topicName, 1, 1);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Context context = vertx.getOrCreateContext();
  consumer = createConsumer(context, config);

  Async done = ctx.async();

  consumer.handler(record -> {
    // no need for handling incoming records in this test
  });

  consumer.subscribe(Collections.singleton(topicName), asyncResult -> {

    if (asyncResult.succeeded()) {

      consumer.listTopics(asyncResult1 -> {

        if (asyncResult1.succeeded()) {

          ctx.assertTrue(asyncResult1.result().containsKey(topicName));
          done.complete();

        } else {
          ctx.fail();
        }
      });

    } else {
      ctx.fail();
    }

  });
}