Java Code Examples for org.apache.kafka.clients.consumer.OffsetResetStrategy

The following examples show how to use org.apache.kafka.clients.consumer.OffsetResetStrategy. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: smallrye-reactive-messaging   Source File: KafkaUsage.java    License: Apache License 2.0 6 votes vote down vote up
public Properties getConsumerProperties(String groupId, String clientId, OffsetResetStrategy autoOffsetReset) {
    if (groupId == null) {
        throw new IllegalArgumentException("The groupId is required");
    } else {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", brokers);
        props.setProperty("group.id", groupId);
        props.setProperty("enable.auto.commit", Boolean.FALSE.toString());
        if (autoOffsetReset != null) {
            props.setProperty("auto.offset.reset",
                    autoOffsetReset.toString().toLowerCase());
        }

        if (clientId != null) {
            props.setProperty("client.id", clientId);
        }

        return props;
    }
}
 
Example 2
@Override
public Consumer<String, ByteBuffer> createConsumer(String subscription) {
  MockConsumer<String, ByteBuffer> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  if (!createdConsumers.containsKey(subscription)) {
    createdConsumers.put(subscription, new ArrayList<>());
  }
  createdConsumers.get(subscription).add(consumer);

  MockConsumerConfiguration configuration = consumerConfigurations.get(subscription);
  if (configuration != null) {
    consumer.updatePartitions(configuration.topic, configuration.partitionInfoList);
    consumer.updateBeginningOffsets(configuration.startOffsets);
    consumer.updateEndOffsets(configuration.endOffsets);
  }
  return consumer;
}
 
Example 3
@Test
public void shouldApplyTopicRenameWhenCheckingHealth() {
  MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  List<PartitionInfo> partitionInfoList =
      Arrays.asList(
          new PartitionInfo("replace1", 0, null, null, null),
          new PartitionInfo("replace1", 1, null, null, null));
  consumer.updatePartitions("replace1", partitionInfoList);

  SourcePartitionValidator sourcePartitionHealthChecker =
      new SourcePartitionValidator(
          consumer,
          SourcePartitionValidator.MatchingStrategy.TOPIC,
          t -> t.equals("topic1") ? "replace1" : t);
  assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic1", 0)), is(true));
  assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic1", 2)), is(true));
  assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic2", 0)), is(false));
}
 
Example 4
Source Project: common-kafka   Source File: ProcessingPartition.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the offset that would be used for the partition based on the consumer's configured offset reset strategy
 *
 * @return the offset that would be used for the partition based on the consumer's configured offset reset strategy
 */
protected long getResetOffset() {
    OffsetResetStrategy strategy = config.getOffsetResetStrategy();

    if (strategy == OffsetResetStrategy.EARLIEST) {
        LOGGER.debug("Looking up offset for partition [{}] using earliest reset", topicPartition);
        return getEarliestOffset();
    }
    else if (strategy == OffsetResetStrategy.LATEST) {
        LOGGER.debug("Looking up offset for partition [{}] using latest reset", topicPartition);
        return getLatestOffset();
    }
    else {
        throw new IllegalStateException("Unable to reset partition to previously committed offset as there is no"
                + " offset for partition [" + topicPartition + "] and offset reset strategy [" + strategy + "] is unknown");
    }
}
 
Example 5
Source Project: common-kafka   Source File: ProcessingPartitionTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() {
    properties = new Properties();
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, OffsetResetStrategy.EARLIEST.toString().toLowerCase());

    config = new ProcessingConfig(properties);
    topicPartition = new TopicPartition("topic", 1);

    when(consumer.committed(topicPartition)).thenReturn(new OffsetAndMetadata(0L));

    partition = new MockProcessingPartition<>(topicPartition, config, consumer);

    logAppender = new TestLogAppender();
    RootLogger.getRootLogger().addAppender(logAppender);
}
 
Example 6
Source Project: common-kafka   Source File: ProcessingConfigTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void constructor_defaults() throws IOException {
    assertTrue(config.getCommitInitialOffset());
    assertThat(config.getCommitSizeThreshold(), is(Long.parseLong(ProcessingConfig.COMMIT_SIZE_THRESHOLD_DEFAULT)));
    assertThat(config.getCommitTimeThreshold(), is(Long.parseLong(ProcessingConfig.COMMIT_TIME_THRESHOLD_DEFAULT)));
    assertThat(config.getFailPauseTime(), is(Long.parseLong(ProcessingConfig.FAIL_PAUSE_TIME_DEFAULT)));
    assertThat(config.getFailSampleSize(), is(Integer.parseInt(ProcessingConfig.FAIL_SAMPLE_SIZE_DEFAULT)));
    assertThat(config.getFailThreshold(), is(Double.parseDouble(ProcessingConfig.FAIL_THRESHOLD_DEFAULT)));
    assertThat(config.getOffsetResetStrategy(), is(OffsetResetStrategy.EARLIEST));
    assertThat(config.getMaxPollInterval(), is(300000L));

    // config properties should at least contain everything from properties
    for(Map.Entry<Object, Object> entry : properties.entrySet()) {
        assertThat(config.getProperties().get(entry.getKey()), is(entry.getValue()));
    }

    assertThat(config.getProperties().getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG), is(Boolean.FALSE.toString()));
}
 
Example 7
Source Project: vertx-kafka-client   Source File: AdminClientTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testListConsumerGroups(TestContext ctx) {

  KafkaAdminClient adminClient = KafkaAdminClient.create(this.vertx, config);

  Async async = ctx.async();

  //kafkaCluster.useTo().consumeStrings(() -> true, null, Collections.singletonList("first-topic"), c -> { });

  kafkaCluster.useTo().consume("groupId", "clientId", OffsetResetStrategy.EARLIEST,
    new StringDeserializer(), new StringDeserializer(), () -> true, null, null,
    Collections.singleton("first-topic"), c -> { });

  // timer because, Kafka cluster takes time to start consumer
  vertx.setTimer(1000, t -> {

    adminClient.listConsumerGroups(ctx.asyncAssertSuccess(groups -> {

      ctx.assertTrue(groups.size() > 0);
      ctx.assertTrue(groups.stream().map(ConsumerGroupListing::getGroupId).anyMatch(g -> g.equals("groupId")));
      adminClient.close();
      async.complete();
    }));

  });
}
 
Example 8
Source Project: vertx-kafka-client   Source File: ConsumerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testConsume(TestContext ctx) throws Exception {
  final String topicName = "testConsume";
  String consumerId = topicName;
  Async batch = ctx.async();
  AtomicInteger index = new AtomicInteger();
  int numMessages = 1000;
  kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch.awaitSuccess(20000);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  consumer = createConsumer(vertx, config);
  Async done = ctx.async();
  AtomicInteger count = new AtomicInteger(numMessages);
  consumer.exceptionHandler(ctx::fail);
  consumer.handler(rec -> {
    if (count.decrementAndGet() == 0) {
      done.complete();
    }
  });
  consumer.subscribe(Collections.singleton(topicName));
}
 
Example 9
Source Project: calcite   Source File: KafkaMockConsumer.java    License: Apache License 2.0 6 votes vote down vote up
public KafkaMockConsumer(final OffsetResetStrategy offsetResetStrategy) {
  super(OffsetResetStrategy.EARLIEST);

  assign(Arrays.asList(new TopicPartition("testtopic", 0)));

  HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
  beginningOffsets.put(new TopicPartition("testtopic", 0), 0L);
  updateBeginningOffsets(beginningOffsets);

  for (int idx = 0; idx < 10; ++idx) {
    addRecord(
        new ConsumerRecord<>("testtopic",
            0, idx,
            ("mykey" + idx).getBytes(StandardCharsets.UTF_8),
            ("myvalue" + idx).getBytes(StandardCharsets.UTF_8)));
  }
}
 
Example 10
Source Project: vertx-kafka-client   Source File: ConsumerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPartitionsFor(TestContext ctx) throws Exception {
  String topicName = "testPartitionsFor";
  String consumerId = topicName;
  kafkaCluster.createTopic(topicName, 2, 1);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Context context = vertx.getOrCreateContext();
  consumer = createConsumer(context, config);

  Async done = ctx.async();

  consumer.partitionsFor(topicName, ar -> {
    if (ar.succeeded()) {
      List<PartitionInfo> partitionInfo = ar.result();
      ctx.assertEquals(2, partitionInfo.size());
    } else {
      ctx.fail();
    }
    done.complete();
  });
}
 
Example 11
Source Project: beam   Source File: KafkaIOTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSourceWithExplicitPartitionsDisplayData() {
  KafkaIO.Read<byte[], byte[]> read =
      KafkaIO.readBytes()
          .withBootstrapServers("myServer1:9092,myServer2:9092")
          .withTopicPartitions(
              ImmutableList.of(new TopicPartition("test", 5), new TopicPartition("test", 6)))
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  Lists.newArrayList("test"),
                  10,
                  10,
                  OffsetResetStrategy.EARLIEST)); // 10 partitions

  DisplayData displayData = DisplayData.from(read);

  assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6"));
  assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
  assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
  assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
  assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
 
Example 12
Source Project: vertx-kafka-client   Source File: ConsumerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testBatchHandler(TestContext ctx) throws Exception {
  String topicName = "testBatchHandler";
  String consumerId = topicName;
  Async batch1 = ctx.async();
  AtomicInteger index = new AtomicInteger();
  int numMessages = 500;
  kafkaCluster.useTo().produceStrings(numMessages, batch1::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.getAndIncrement()));
  batch1.awaitSuccess(10000);
  Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Context context = vertx.getOrCreateContext();
  consumer = createConsumer(context, config);
  Async batchHandler = ctx.async();
  consumer.batchHandler(records -> {
    ctx.assertEquals(numMessages, records.count());
    batchHandler.complete();
  });
  consumer.exceptionHandler(ctx::fail);
  consumer.handler(rec -> {});
  consumer.subscribe(Collections.singleton(topicName));
}
 
Example 13
Source Project: vertx-kafka-client   Source File: ConsumerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPollTimeout(TestContext ctx) throws Exception {
  Async async = ctx.async();
  String topicName = "testPollTimeout";
  Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0);
  KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config);

  int pollingTimeout = 1500;
  // Set the polling timeout to 1500 ms (default is 1000)
  consumerWithCustomTimeout.pollTimeout(Duration.ofMillis(pollingTimeout));
  // Subscribe to the empty topic (we want the poll() call to timeout!)
  consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> {
    consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once
    long beforeSeek = System.currentTimeMillis();
    consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> {
      long durationWShortTimeout = System.currentTimeMillis() - beforeSeek;
      ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout");
      consumerWithCustomTimeout.close();
      async.countDown();
    });
  });
}
 
Example 14
Source Project: vertx-kafka-client   Source File: ConsumerMockTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testConsume(TestContext ctx) throws Exception {
  MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
  Async doneLatch = ctx.async();
  consumer.handler(record -> {
    ctx.assertEquals("the_topic", record.topic());
    ctx.assertEquals(0, record.partition());
    ctx.assertEquals("abc", record.key());
    ctx.assertEquals("def", record.value());
    consumer.close(v -> doneLatch.complete());
  });
  consumer.subscribe(Collections.singleton("the_topic"), v -> {
    mock.schedulePollTask(() -> {
      mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
      mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, "abc", "def"));
      mock.seek(new TopicPartition("the_topic", 0), 0L);
    });
  });
}
 
Example 15
Source Project: vertx-kafka-client   Source File: ConsumerMockTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testConsumeWithHeader(TestContext ctx) {
  MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
  Async doneLatch = ctx.async();
  consumer.handler(record -> {
    ctx.assertEquals("the_topic", record.topic());
    ctx.assertEquals(0, record.partition());
    ctx.assertEquals("abc", record.key());
    ctx.assertEquals("def", record.value());
    Header[] headers = record.headers().toArray();
    ctx.assertEquals(1, headers.length);
    Header header = headers[0];
    ctx.assertEquals("header_key", header.key());
    ctx.assertEquals("header_value", new String(header.value()));
    consumer.close(v -> doneLatch.complete());
  });
  consumer.subscribe(Collections.singleton("the_topic"), v -> {
    mock.schedulePollTask(() -> {
      mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
      mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0L, 0, 0, "abc", "def",
        new RecordHeaders(Collections.singletonList(new RecordHeader("header_key", "header_value".getBytes())))));
      mock.seek(new TopicPartition("the_topic", 0), 0L);
    });
  });
}
 
Example 16
Source Project: beam   Source File: KafkaIOTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a consumer with two topics, with 10 partitions each. numElements are (round-robin)
 * assigned all the 20 partitions.
 */
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
    int numElements,
    int maxNumRecords,
    @Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {

  List<String> topics = ImmutableList.of("topic_a", "topic_b");

  KafkaIO.Read<Integer, Long> reader =
      KafkaIO.<Integer, Long>read()
          .withBootstrapServers("myServer1:9092,myServer2:9092")
          .withTopics(topics)
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions
          .withKeyDeserializer(IntegerDeserializer.class)
          .withValueDeserializer(LongDeserializer.class)
          .withMaxNumRecords(maxNumRecords);

  if (timestampFn != null) {
    return reader.withTimestampFn(timestampFn);
  } else {
    return reader;
  }
}
 
Example 17
Source Project: beam   Source File: KafkaIOTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testUnboundedSourceWithExplicitPartitions() {
  int numElements = 1000;

  List<String> topics = ImmutableList.of("test");

  KafkaIO.Read<byte[], Long> reader =
      KafkaIO.<byte[], Long>read()
          .withBootstrapServers("none")
          .withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5)))
          .withConsumerFactoryFn(
              new ConsumerFactoryFn(
                  topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions
          .withKeyDeserializer(ByteArrayDeserializer.class)
          .withValueDeserializer(LongDeserializer.class)
          .withMaxNumRecords(numElements / 10);

  PCollection<Long> input = p.apply(reader.withoutMetadata()).apply(Values.create());

  // assert that every element is a multiple of 5.
  PAssert.that(input).satisfies(new AssertMultipleOf(5));

  PAssert.thatSingleton(input.apply(Count.globally())).isEqualTo(numElements / 10L);

  p.run();
}
 
Example 18
Source Project: talk-kafka-zipkin   Source File: HelloConsumer.java    License: MIT License 5 votes vote down vote up
public static void main(String[] args) {

		final var config = ConfigFactory.load();
		/* START TRACING INSTRUMENTATION */
		final var sender = URLConnectionSender.newBuilder()
				.endpoint(config.getString("zipkin.endpoint")).build();
		final var reporter = AsyncReporter.builder(sender).build();
		final var tracing = Tracing.newBuilder().localServiceName("hello-consumer")
				.sampler(Sampler.ALWAYS_SAMPLE).spanReporter(reporter).build();
		final var kafkaTracing = KafkaTracing.newBuilder(tracing)
				.remoteServiceName("kafka").build();
		/* END TRACING INSTRUMENTATION */

		final var consumerConfigs = new Properties();
		consumerConfigs.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
				config.getString("kafka.bootstrap-servers"));
		consumerConfigs.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
				StringDeserializer.class.getName());
		consumerConfigs.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
				StringDeserializer.class.getName());
		consumerConfigs.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "hello-consumer");
		consumerConfigs.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
				OffsetResetStrategy.EARLIEST.name().toLowerCase());
		final var kafkaConsumer = new KafkaConsumer<String, String>(consumerConfigs);
		final var tracingConsumer = kafkaTracing.consumer(kafkaConsumer);

		tracingConsumer.subscribe(Collections.singletonList("hello"));

		while (!Thread.interrupted()) {
			var records = tracingConsumer.poll(Duration.ofMillis(Long.MAX_VALUE));
			for (var record : records) {
				brave.Span span = kafkaTracing.nextSpan(record).name("print-hello")
						.start();
				span.annotate("starting printing");
				out.println(String.format("Record: %s", record));
				span.annotate("printing finished");
				span.finish();
			}
		}
	}
 
Example 19
Source Project: smallrye-reactive-messaging   Source File: KafkaUsage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Use the supplied function to asynchronously consume messages from the cluster.
 *
 * @param groupId the name of the group; may not be null
 * @param clientId the name of the client; may not be null
 * @param autoOffsetReset how to pick a starting offset when there is no initial offset in ZooKeeper or if an offset is
 *        out of range; may be null for the default to be used
 * @param keyDeserializer the deserializer for the keys; may not be null
 * @param valueDeserializer the deserializer for the values; may not be null
 * @param continuation the function that determines if the consumer should continue; may not be null
 * @param offsetCommitCallback the callback that should be used after committing offsets; may be null if offsets are
 *        not to be committed
 * @param completion the function to call when the consumer terminates; may be null
 * @param topics the set of topics to consume; may not be null or empty
 * @param consumerFunction the function to consume the messages; may not be null
 */
public <K, V> void consume(String groupId, String clientId, OffsetResetStrategy autoOffsetReset,
        Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer,
        BooleanSupplier continuation, OffsetCommitCallback offsetCommitCallback, Runnable completion,
        Collection<String> topics,
        java.util.function.Consumer<ConsumerRecord<K, V>> consumerFunction) {
    Properties props = getConsumerProperties(groupId, clientId, autoOffsetReset);
    Thread t = new Thread(() -> {
        LOGGER.infof("Starting consumer %s to read messages", clientId);
        try (KafkaConsumer<K, V> consumer = new KafkaConsumer<>(props, keyDeserializer, valueDeserializer)) {
            consumer.subscribe(new ArrayList<>(topics));
            while (continuation.getAsBoolean()) {
                consumer.poll(Duration.ofMillis(10)).forEach(record -> {
                    LOGGER.infof("Consumer %s: consuming message %s", clientId, record);
                    consumerFunction.accept(record);
                    if (offsetCommitCallback != null) {
                        consumer.commitAsync(offsetCommitCallback);
                    }
                });
            }
        } finally {
            if (completion != null) {
                completion.run();
            }
            LOGGER.debugf("Stopping consumer %s", clientId);
        }
    });
    t.setName(clientId + "-thread");
    t.start();
}
 
Example 20
Source Project: smallrye-reactive-messaging   Source File: KafkaUsage.java    License: Apache License 2.0 5 votes vote down vote up
private void consumeStrings(BooleanSupplier continuation, Runnable completion, Collection<String> topics,
        Consumer<ConsumerRecord<String, String>> consumerFunction) {
    Deserializer<String> keyDes = new StringDeserializer();
    String randomId = UUID.randomUUID().toString();
    this.consume(randomId, randomId, OffsetResetStrategy.EARLIEST, keyDes, keyDes, continuation, null,
            completion, topics, consumerFunction);
}
 
Example 21
Source Project: smallrye-reactive-messaging   Source File: KafkaUsage.java    License: Apache License 2.0 5 votes vote down vote up
private void consumeIntegers(BooleanSupplier continuation, Runnable completion, Collection<String> topics,
        Consumer<ConsumerRecord<String, Integer>> consumerFunction) {
    Deserializer<String> keyDes = new StringDeserializer();
    Deserializer<Integer> valDes = new IntegerDeserializer();
    String randomId = UUID.randomUUID().toString();
    this.consume(randomId, randomId, OffsetResetStrategy.EARLIEST, keyDes, valDes, continuation, null,
            completion, topics, consumerFunction);
}
 
Example 22
Source Project: smallrye-reactive-messaging   Source File: KafkaUsage.java    License: Apache License 2.0 5 votes vote down vote up
private void consumeDoubles(BooleanSupplier continuation, Runnable completion, Collection<String> topics,
        Consumer<ConsumerRecord<String, Double>> consumerFunction) {
    Deserializer<String> keyDes = new StringDeserializer();
    Deserializer<Double> valDes = new DoubleDeserializer();
    String randomId = UUID.randomUUID().toString();
    this.consume(randomId, randomId, OffsetResetStrategy.EARLIEST, keyDes, valDes, continuation, null,
            completion, topics, consumerFunction);
}
 
Example 23
@Test
public void testDeadLetterQueueStrategyWithDefaultTopic() {
    KafkaUsage usage = new KafkaUsage();
    List<ConsumerRecord<String, Integer>> records = new CopyOnWriteArrayList<>();
    String randomId = UUID.randomUUID().toString();

    usage.consume(randomId, randomId, OffsetResetStrategy.EARLIEST,
            new StringDeserializer(), new IntegerDeserializer(), () -> records.size() < 3, null, null,
            Collections.singletonList("dead-letter-topic-kafka"), records::add);

    addConfig(getDeadLetterQueueConfig());
    container = baseWeld().addBeanClass(MyReceiverBean.class).initialize();
    AtomicInteger counter = new AtomicInteger();
    new Thread(() -> usage.produceIntegers(10, null,
            () -> new ProducerRecord<>("dead-letter-default", counter.getAndIncrement()))).start();

    MyReceiverBean bean = container.getBeanManager().createInstance().select(MyReceiverBean.class).get();
    await().atMost(2, TimeUnit.MINUTES).until(() -> bean.list().size() >= 10);
    assertThat(bean.list()).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);

    await().atMost(2, TimeUnit.MINUTES).until(() -> records.size() == 3);
    assertThat(records).allSatisfy(r -> {
        assertThat(r.topic()).isEqualTo("dead-letter-topic-kafka");
        assertThat(r.value()).isIn(3, 6, 9);
        assertThat(new String(r.headers().lastHeader("dead-letter-reason").value())).startsWith("nack 3 -");
        assertThat(r.headers().lastHeader("dead-letter-cause")).isNull();
    });
}
 
Example 24
@Test
public void testDeadLetterQueueStrategyWithCustomConfig() {
    KafkaUsage usage = new KafkaUsage();
    List<ConsumerRecord<String, Integer>> records = new CopyOnWriteArrayList<>();
    String randomId = UUID.randomUUID().toString();

    usage.consume(randomId, randomId, OffsetResetStrategy.EARLIEST,
            new StringDeserializer(), new IntegerDeserializer(), () -> records.size() < 3, null, null,
            Collections.singletonList("missed"), records::add);

    addConfig(getDeadLetterQueueWithCustomConfig());
    container = baseWeld().addBeanClass(MyReceiverBean.class).initialize();
    AtomicInteger counter = new AtomicInteger();
    new Thread(() -> usage.produceIntegers(10, null,
            () -> new ProducerRecord<>("dead-letter-custom", counter.getAndIncrement()))).start();

    MyReceiverBean bean = container.getBeanManager().createInstance().select(MyReceiverBean.class).get();
    await().atMost(2, TimeUnit.MINUTES).until(() -> bean.list().size() >= 10);
    assertThat(bean.list()).containsExactly(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);

    await().atMost(2, TimeUnit.MINUTES).until(() -> records.size() == 3);
    assertThat(records).allSatisfy(r -> {
        assertThat(r.topic()).isEqualTo("missed");
        assertThat(r.value()).isIn(3, 6, 9);
        assertThat(new String(r.headers().lastHeader("dead-letter-reason").value())).startsWith("nack 3 -");
        assertThat(r.headers().lastHeader("dead-letter-cause")).isNull();
    });
}
 
Example 25
private MockConsumer<byte[], byte[]> mockSourceConsumer() {
  MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  updateMockPartitions(mockConsumer, "topic1", 2);
  updateMockPartitions(mockConsumer, "topic2", 1);
  updateMockPartitions(mockConsumer, "topic3", 1);
  updateMockPartitions(mockConsumer, "topic4", 1);
  updateMockPartitions(mockConsumer, "topic5", 1);
  updateMockPartitions(mockConsumer, "reroute.outgoing", 1);
  return mockConsumer;
}
 
Example 26
private MockConsumer<byte[], byte[]> mockDestinationConsumer() {
  // Topic 5 is NOT present in destination
  MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  updateMockPartitions(mockConsumer, "topic1", 2);
  updateMockPartitions(mockConsumer, "topic2", 1);
  updateMockPartitions(mockConsumer, "topic3", 1);
  updateMockPartitions(mockConsumer, "topic4", 1);
  updateMockPartitions(mockConsumer, "reroute.incoming", 1);
  return mockConsumer;
}
 
Example 27
@Test
public void shouldApplyTopicRenameTransforms() {
  Map<String, String> properties = getBaseProperties();
  properties.put(SourceConfigDefinition.TOPICS_REGEX.getKey(), "reroute.*");
  properties.put("transforms", "reroute");
  properties.put("transforms.reroute.type", "org.apache.kafka.connect.transforms.RegexRouter");
  properties.put("transforms.reroute.regex", "^reroute\\.outgoing$");
  properties.put("transforms.reroute.replacement", "reroute.incoming");
  SourceConfig config = new SourceConfig(properties);

  MockConsumer<byte[], byte[]> mockSource = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  updateMockPartitions(mockSource, "reroute.outgoing", 1);

  MockConsumer<byte[], byte[]> mockDest = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  updateMockPartitions(mockDest, "reroute.incoming", 1);

  TaskConfigBuilder taskConfigBuilder =
      new TaskConfigBuilder(new RoundRobinTaskAssignor(), config);
  KafkaMonitor monitor =
      new KafkaMonitor(
          mock(ConnectorContext.class), config, mockSource, mockDest, taskConfigBuilder);

  monitor.partitionsChanged();
  List<Map<String, String>> result = monitor.taskConfigs(3);

  List<TopicPartition> partitions = assignedTopicPartitionsFromTaskConfigs(result);
  assertThat(partitions, contains(new TopicPartition("reroute.outgoing", 0)));
}
 
Example 28
@Test
public void shouldContinueRunningWhenExceptionEncountered() throws InterruptedException {
  Map<String, String> properties = getBaseProperties();
  SourceConfig config = new SourceConfig(properties);
  TaskConfigBuilder taskConfigBuilder =
      new TaskConfigBuilder(new RoundRobinTaskAssignor(), config);

  // Require two thrown exceptions to ensure that the KafkaMonitor run loop executes more than
  // once
  CountDownLatch exceptionThrownLatch = new CountDownLatch(2);
  MockConsumer<byte[], byte[]> consumer =
      new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
        @Override
        public Map<String, List<PartitionInfo>> listTopics() {
          exceptionThrownLatch.countDown();
          throw new TimeoutException("KABOOM!");
        }
      };

  kafkaMonitor =
      new KafkaMonitor(
          mock(ConnectorContext.class),
          config,
          consumer,
          mockDestinationConsumer,
          taskConfigBuilder);
  Thread monitorThread = new Thread(kafkaMonitor);
  monitorThread.start();
  exceptionThrownLatch.await(2, TimeUnit.SECONDS);
  monitorThread.join(1);

  assertThat(monitorThread.getState(), not(State.TERMINATED));
  kafkaMonitor.stop();
  monitorThread.interrupt();
  monitorThread.join(5000);
}
 
Example 29
@Before
public void setUp() {
  this.mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  List<PartitionInfo> partitionInfoList =
      Arrays.asList(
          new PartitionInfo("topic1", 0, null, null, null),
          new PartitionInfo("topic1", 1, null, null, null));
  mockConsumer.updatePartitions("topic1", partitionInfoList);
}
 
Example 30
@Before
public void setUp() {
  mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  mockConsumer.updatePartitions(
      TOPIC,
      Arrays.asList(
          new PartitionInfo(TOPIC, 0, null, null, null),
          new PartitionInfo(TOPIC, 1, null, null, null)));
  mirusSourceTask = new MirusSourceTask(consumerProperties -> mockConsumer);

  // Always return offset = 0
  SourceTaskContext context =
      new SourceTaskContext() {
        @Override
        public Map<String, String> configs() {
          return null;
        }

        @Override
        public OffsetStorageReader offsetStorageReader() {
          return new OffsetStorageReader() {
            @Override
            public <T> Map<String, Object> offset(Map<String, T> partition) {
              return new HashMap<>(MirusSourceTask.offsetMap(0L));
            }

            @Override
            public <T> Map<Map<String, T>, Map<String, Object>> offsets(
                Collection<Map<String, T>> partitions) {
              return partitions.stream().collect(Collectors.toMap(p -> p, this::offset));
            }
          };
        }
      };
  mirusSourceTask.initialize(context);
  mirusSourceTask.start(mockTaskProperties());
}