Java Code Examples for org.apache.kafka.clients.consumer.Consumer

The following examples show how to use org.apache.kafka.clients.consumer.Consumer. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: synapse   Source File: KafkaMessageSenderTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldSendCustomMessageHeaders() {
    // given
    final Message<ExampleJsonObject> message = message(
            "someKey",
            Header.of(of("first", "one", "second", "two")),
            new ExampleJsonObject("banana"));
    // given

    try (final Consumer<String, String> consumer = getKafkaConsumer("someTestGroup")) {
        embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KAFKA_TOPIC);

        // when
        messageSender.send(message).join();

        // then
        final ConsumerRecord<String, String> record = getSingleRecord(consumer, KAFKA_TOPIC, 250L);
        assertThat(record.key(), is("someKey"));
        assertThat(record.value(), is("{\"value\":\"banana\"}"));
        assertThat(record.headers().lastHeader("first").value(), is("one".getBytes()));
        assertThat(record.headers().lastHeader("second").value(), is("two".getBytes()));
    }
}
 
Example 2
Source Project: apicurio-registry   Source File: ConsumerContainer.java    License: Apache License 2.0 6 votes vote down vote up
public DynamicPool(
        Properties consumerProperties,
        Deserializer<K> keyDeserializer,
        Deserializer<V> valueDeserializer,
        String topic,
        int initialConsumerThreads,
        Oneof2<
                java.util.function.Consumer<? super ConsumerRecord<K, V>>,
                java.util.function.Consumer<? super ConsumerRecords<K, V>>
                > recordOrRecordsHandler,
        BiConsumer<? super Consumer<?, ?>, ? super RuntimeException> consumerExceptionHandler
) {
    this.consumerProperties = Objects.requireNonNull(consumerProperties);
    this.keyDeserializer = Objects.requireNonNull(keyDeserializer);
    this.valueDeserializer = Objects.requireNonNull(valueDeserializer);
    this.topic = Objects.requireNonNull(topic);
    this.recordOrRecordsHandler = Objects.requireNonNull(recordOrRecordsHandler);
    this.consumerExceptionHandler = Objects.requireNonNull(consumerExceptionHandler);
    setConsumerThreads(initialConsumerThreads);
}
 
Example 3
Source Project: synapse   Source File: KafkaStreamsTests.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void someOtherTest() throws ExecutionException, InterruptedException {
    Map<String, Object> consumerProps = consumerProps("otherTestGroup", "true", this.embeddedKafka);
    consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    ConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);

    Consumer<String, String> consumer = cf.createConsumer();
    this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, STREAMING_TOPIC2);

    template.send(STREAMING_TOPIC2, "someOtherTestMessage", "foo").get();

    ConsumerRecord<String, String> replies = getSingleRecord(consumer, STREAMING_TOPIC2, 250L);
    assertThat(replies.key(), is("someOtherTestMessage"));
}
 
Example 4
Source Project: localization_nifi   Source File: ConsumerLease.java    License: Apache License 2.0 6 votes vote down vote up
ConsumerLease(
        final long maxWaitMillis,
        final Consumer<byte[], byte[]> kafkaConsumer,
        final byte[] demarcatorBytes,
        final String keyEncoding,
        final String securityProtocol,
        final String bootstrapServers,
        final ComponentLog logger) {
    this.maxWaitMillis = maxWaitMillis;
    this.kafkaConsumer = kafkaConsumer;
    this.demarcatorBytes = demarcatorBytes;
    this.keyEncoding = keyEncoding;
    this.securityProtocol = securityProtocol;
    this.bootstrapServers = bootstrapServers;
    this.logger = logger;
}
 
Example 5
Source Project: rya   Source File: KafkaTestUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Polls a {@link Consumer} until it has either polled too many times without hitting the target number
 * of results, or it hits the target number of results.
 *
 * @param pollMs - How long each poll could take.
 * @param pollIterations - The maximum number of polls that will be attempted.
 * @param targetSize - The number of results to read before stopping.
 * @param consumer - The consumer that will be polled.
 * @return The results that were read from the consumer.
 * @throws Exception If the poll failed.
 */
public static <K, V> List<V> pollForResults(
        final int pollMs,
        final int pollIterations,
        final int targetSize,
        final Consumer<K, V> consumer) throws Exception {
    requireNonNull(consumer);

    final List<V> values = new ArrayList<>();

    int i = 0;
    while(values.size() < targetSize && i < pollIterations) {
        for(final ConsumerRecord<K, V> record : consumer.poll(pollMs)) {
            values.add( record.value() );
        }
        i++;
    }

    return values;
}
 
Example 6
Source Project: rya   Source File: KafkaTestInstanceRule.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Delete all of the topics that are in the embedded Kafka instance.
 *
 * @throws InterruptedException Interrupted while waiting for the topics to be deleted.
 */
public void deleteAllTopics() throws InterruptedException {
    // Setup the consumer that is used to list topics for the source.
    final Properties consumerProperties = createBootstrapServerConfig();
    consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

    try(final Consumer<String, String> listTopicsConsumer = new KafkaConsumer<>(consumerProperties)) {
        // Mark all existing topics for deletion.
        Set<String> topics = listTopicsConsumer.listTopics().keySet();
        for(final String topic : topics) {
            deleteTopic(topic);
        }

        // Loop and wait until they are all gone.
        while(!topics.isEmpty()) {
            Thread.sleep(100);
            topics = listTopicsConsumer.listTopics().keySet();
        }
    }
}
 
Example 7
Source Project: brave   Source File: TracingConsumerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void should_call_wrapped_poll_and_close_spans() {
  consumer.addRecord(consumerRecord);
  Consumer<String, String> tracingConsumer = kafkaTracing.consumer(consumer);
  tracingConsumer.poll(10);

  // offset changed
  assertThat(consumer.position(topicPartition)).isEqualTo(2L);


  MutableSpan consumerSpan = spans.get(0);
  assertThat(consumerSpan.kind()).isEqualTo(CONSUMER);
  assertThat(consumerSpan.name()).isEqualTo("poll");
  assertThat(consumerSpan.tags())
    .containsOnly(entry("kafka.topic", "myTopic"));
}
 
Example 8
Source Project: kbear   Source File: ConsumerTest.java    License: Apache License 2.0 6 votes vote down vote up
protected void commitSync(
        java.util.function.BiConsumer<Consumer<String, String>, Map<TopicPartition, OffsetAndMetadata>> committer)
        throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumerWithoutAutoCommit()) {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);

        OffsetAndMetadata committed = consumer.committed(_topicPartition);
        System.out.println("committed: " + committed);
        OffsetAndMetadata committed2 = new OffsetAndMetadata(committed.offset() + _messageCount,
                committed.metadata());
        System.out.println("committed2: " + committed2);
        Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();
        offsetMap.put(_topicPartition, committed2);
        committer.accept(consumer, offsetMap);
        OffsetAndMetadata committed3 = consumer.committed(_topicPartition);
        System.out.println("committed3: " + committed3);
        Assert.assertEquals(committed2.offset(), committed3.offset());
    }
}
 
Example 9
Source Project: kbear   Source File: ConsumerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void seekToEnd() throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumer()) {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);

        produceMessages();

        System.out.println("\nseek\n");
        consumer.seekToEnd(_topicPartitions);

        System.out.println("\nsecond-poll\n");
        ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(30 * 1000));
        Assert.assertEquals(0, consumerRecords.count());
    }
}
 
Example 10
Source Project: alcor   Source File: MessageConsumerFactory.java    License: Apache License 2.0 6 votes vote down vote up
public Consumer Create() {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaAddress);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, IKafkaConfiguration.CONSUMER_GROUP_ID);

    // Key is set as long
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());

    Deserializer deserializer = getDeserializer();
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserializer.getClass().getName());

    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, IKafkaConfiguration.MAX_POLL_RECORDS);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, IKafkaConfiguration.OFFSET_RESET_EARLIER);

    Consumer<Long, String> consumer = new KafkaConsumer<>(props);
    return consumer;
}
 
Example 11
Source Project: kafka-pubsub-emulator   Source File: BaseIT.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a KafkaConsumer that is manually assigned to all partitions of the test topic indicated
 * by the {@code subscription}.
 */
protected Consumer<String, ByteBuffer> getValidationConsumer(String topic, String subscription) {
  Consumer<String, ByteBuffer> consumer =
      kafkaClientFactory.createConsumer(
          ProjectSubscriptionName.of(PROJECT, subscription).toString());

  Set<TopicPartition> topicPartitions =
      consumer
          .listTopics()
          .entrySet()
          .stream()
          .filter(e -> e.getKey().equals(ProjectTopicName.of(PROJECT, topic).toString()))
          .flatMap(
              e -> e.getValue().stream().map(p -> new TopicPartition(p.topic(), p.partition())))
          .collect(Collectors.toSet());
  consumer.assign(topicPartitions);

  return consumer;
}
 
Example 12
Source Project: garmadon   Source File: OffsetResetterTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void partitionsAssignedCannotFetchOffset() throws IOException {
    final Consumer<Long, String> consumer = mock(Consumer.class);
    final PartitionedWriter successfulWriter = mock(PartitionedWriter.class);
    final PartitionedWriter exceptionalWriter = mock(PartitionedWriter.class);
    final OffsetResetter offsetResetter = new OffsetResetter<>(consumer, mock(java.util.function.Consumer.class),
            Arrays.asList(successfulWriter, exceptionalWriter));
    final TopicPartition partition = new TopicPartition(TOPIC, 1);
    final List<TopicPartition> partitions = Collections.singletonList(partition);

    when(successfulWriter.getStartingOffsets(any())).thenReturn(new HashMap<>());
    when(exceptionalWriter.getStartingOffsets(any())).thenThrow(new IOException("Ayo"));

    offsetResetter.onPartitionsAssigned(partitions);
    verify(consumer, times(1)).seekToBeginning(Collections.singleton(partition));
    verifyNoMoreInteractions(consumer);
}
 
Example 13
Source Project: uReplicator   Source File: TestUtils.java    License: Apache License 2.0 6 votes vote down vote up
private static Consumer<Byte[], Byte[]> createConsumer(String bootstrapServer) {
  final Properties consumerProps = new Properties();
  consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
  consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG,
      "KafkaExampleConsumer");
  consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
      ByteArrayDeserializer.class.getName());
  consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
      ByteArrayDeserializer.class.getName());
  consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  // Create the consumer using props.
  final Consumer<Byte[], Byte[]> consumer =
      new KafkaConsumer<>(consumerProps);
  // Subscribe to the topic.
  return consumer;
}
 
Example 14
private Runnable getConsumerThread(Properties properties) {
    return () -> {
        Consumer<String, String> consumer = null;
        try {
            consumer = new KafkaConsumer<>(properties);
            consumer.subscribe(Collections.singletonList("test-topic"));
            while (!doneConsuming) {
                ConsumerRecords<String, String> records = consumer.poll(5000);
                for (ConsumerRecord<String, String> record : records) {
                    String message = String.format("Consumed: key = %s value = %s with offset = %d partition = %d",
                            record.key(), record.value(), record.offset(), record.partition());
                    System.out.println(message);
                }

            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (consumer != null) {
                consumer.close();
            }
        }
    };
}
 
Example 15
Source Project: uReplicator   Source File: TestUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static List<ConsumerRecord<Byte[], Byte[]>> consumeMessage(String bootstrapServer,
    String topicName,
    int timeoutMs
) throws InterruptedException {

  long time = new Date().getTime();
  Consumer<Byte[], Byte[]> consumer = createConsumer(bootstrapServer);
  consumer.subscribe(Collections.singletonList(topicName));

  List<ConsumerRecord<Byte[], Byte[]>> result = new ArrayList<>();
  while ((new Date().getTime()) - time < timeoutMs) {
    ConsumerRecords<Byte[], Byte[]> records = consumer.poll(1000);
    Iterator<ConsumerRecord<Byte[], Byte[]>> iterator = records.iterator();
    while (iterator.hasNext()) {
      result.add(iterator.next());
    }
    Thread.sleep(300);
  }
  consumer.close();
  return result;
}
 
Example 16
Source Project: datacollector   Source File: TestMapRDBCDCSource.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testMultiplePartitions() throws StageException, InterruptedException {
  MapRDBCDCBeanConfig conf = getConfig();
  conf.topicTableList = Collections.singletonMap("topic", "table");
  conf.numberOfThreads = 1;

  ConsumerRecords<byte[], ChangeDataRecord> consumerRecords1 = generateConsumerRecords(5, 1, "topic", 0, ChangeDataRecordType.RECORD_INSERT);
  ConsumerRecords<byte[], ChangeDataRecord> consumerRecords2 = generateConsumerRecords(5, 1, "topic", 1, ChangeDataRecordType.RECORD_INSERT);
  ConsumerRecords<byte[], ChangeDataRecord> emptyRecords = generateConsumerRecords(0, 1, "topic", 0, ChangeDataRecordType.RECORD_INSERT);

  Consumer mockConsumer = Mockito.mock(Consumer.class);
  List<Consumer> consumerList = Collections.singletonList(mockConsumer);
  Mockito
      .when(mockConsumer.poll(conf.batchWaitTime))
      .thenReturn(consumerRecords1)
      .thenReturn(consumerRecords2)
      .thenReturn(emptyRecords);

  MapRDBCDCSource source = createSource(conf, consumerList.iterator());
  PushSourceRunner sourceRunner = new PushSourceRunner.Builder(MapRDBCDCDSource.class, source)
      .addOutputLane("lane")
      .build();
  sourceRunner.runInit();

  MultiKafkaPushSourceTestCallback callback = new MultiKafkaPushSourceTestCallback(sourceRunner, 2);
  try {
    sourceRunner.runProduce(new HashMap<>(), 5, callback);
    int records = callback.waitForAllBatches();

    source.await();
    Assert.assertEquals(10, records);
    Assert.assertFalse(source.isRunning());
  } catch (Exception e) {
    Assert.fail(e.getMessage());
    throw e;
  } finally {
    sourceRunner.runDestroy();
  }
}
 
Example 17
@Override
public InputStream preview(int maxLength) {
  Properties props = new Properties();
  props.put("bootstrap.servers", brokerHost + ":" + brokerPort);
  props.put("group.id", MULTI_KAFKA_GROUP_NAME);
  props.put("enable.auto.commit", "false");
  props.put(KafkaConstants.AUTO_OFFSET_RESET_CONFIG, KafkaConstants.AUTO_OFFSET_RESET_PREVIEW_VALUE);
  props.put("key.deserializer", KAFKA_STRING_DESERIALIZER);
  props.put("value.deserializer", KAFKA_STRING_DESERIALIZER);
  ByteArrayOutputStream baos;

  try(Consumer<String, String> consumer = new KafkaConsumer<>(props)) {
    baos = new ByteArrayOutputStream(maxLength);
    if(topic.isEmpty()) {
      baos.write(convertToRawListString(consumer.listTopics().keySet()).getBytes());
    } else {
      consumer.subscribe(Arrays.asList(topic));
      ConsumerRecords<String, String> records = consumer.poll(pollTimeout);
      if(records.isEmpty()) {
        baos.write("<no messages>".getBytes());
      } else {
        for (ConsumerRecord<String, String> record : records) {
          baos.write(record.value().getBytes());
        }
      }
    }

    baos.flush();
    baos.close();
  } catch (Exception e) {
    throw new RuntimeException(e);
  }

  return new ByteArrayInputStream(baos.toByteArray());
}
 
Example 18
Source Project: garmadon   Source File: OffsetResetterTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void partitionAssignedValidOffset() throws IOException {
    final Consumer<Long, String> consumer = mock(Consumer.class);
    final PartitionedWriter firstWriter = mock(PartitionedWriter.class);
    final PartitionedWriter secondWriter = mock(PartitionedWriter.class);
    final PartitionedWriter newEventWriter = mock(PartitionedWriter.class);
    final OffsetResetter offsetResetter = new OffsetResetter<>(consumer, mock(java.util.function.Consumer.class),
        Arrays.asList(firstWriter, secondWriter, newEventWriter));
    final TopicPartition firstPartition = new TopicPartition(TOPIC, 1);
    final TopicPartition secondPartition = new TopicPartition(TOPIC, 2);
    final List<TopicPartition> partitions = Arrays.asList(firstPartition, secondPartition);

    final Map<Integer, Long> firstOffsets = new HashMap<>();
    firstOffsets.put(firstPartition.partition(), 10L);
    firstOffsets.put(secondPartition.partition(), OffsetComputer.NO_OFFSET);
    when(firstWriter.getStartingOffsets(any())).thenReturn(firstOffsets);

    final Map<Integer, Long> secondOffsets = new HashMap<>();
    secondOffsets.put(firstPartition.partition(), 15L);
    secondOffsets.put(secondPartition.partition(), OffsetComputer.NO_OFFSET);
    when(secondWriter.getStartingOffsets(any())).thenReturn(secondOffsets);

    final Map<Integer, Long> thirdOffsets = new HashMap<>();
    thirdOffsets.put(firstPartition.partition(), OffsetComputer.NO_OFFSET);
    thirdOffsets.put(secondPartition.partition(), OffsetComputer.NO_OFFSET);
    when(newEventWriter.getStartingOffsets(any())).thenReturn(thirdOffsets);

    offsetResetter.onPartitionsAssigned(partitions);
    verify(consumer, times(1)).seek(eq(firstPartition), eq(10L));
    verify(consumer, times(1)).seekToBeginning(Collections.singleton(secondPartition));
    verifyNoMoreInteractions(consumer);
}
 
Example 19
Source Project: apicurio-registry   Source File: ConsumerContainer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Handle record(s) by passing it/them to {@code handler} and retrying that until successful (possibly ad infinity)
 *
 * @param record the record/records to handle
 */
private <T> void acceptRetryable(T record, java.util.function.Consumer<? super T> handler, Consumer<K, V> consumer) {
    applyRetryable(record, r -> {
        handler.accept(r);
        return null;
    }, consumer);
}
 
Example 20
Source Project: java-kafka-client   Source File: TracingKafkaTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumerBuilderWithStandardSpanNameProvider() throws InterruptedException {
  Producer<Integer, String> producer = createTracingProducer();
  producer.send(new ProducerRecord<>("messages", 1, "test"));
  producer.close();

  assertEquals(1, mockTracer.finishedSpans().size());

  ExecutorService executorService = Executors.newSingleThreadExecutor();
  final CountDownLatch latch = new CountDownLatch(1);

  executorService.execute(() -> {
    Consumer<Integer, String> consumer = createConsumerWithSpanNameProvider(null);

    while (latch.getCount() > 0) {
      ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(100));
      for (ConsumerRecord<Integer, String> record : records) {
        SpanContext spanContext = TracingKafkaUtils
            .extractSpanContext(record.headers(), mockTracer);
        assertNotNull(spanContext);
        assertEquals("test", record.value());
        assertEquals((Integer) 1, record.key());

        consumer.commitSync();
        latch.countDown();
      }
    }
    consumer.close();
  });

  assertTrue(latch.await(30, TimeUnit.SECONDS));

  assertEquals("From_messages", mockTracer.finishedSpans().get(1).operationName());
}
 
Example 21
Source Project: kafka-helmsman   Source File: FreshnessTrackerTest.java    License: MIT License 5 votes vote down vote up
/**
 * This is a somewhat contrived test case . The first committed offset should be the offset at (0), but this is
 * checking if that isn't the latest (consumer is not up-to-date) but somehow even got something committed. Its
 * strange, but has been seen to happen with old consumers whose offset have fallen off the stream or other,
 * strange cases given the asynchronous nature of the interactions between burrow and kafka.
 */
@Test
public void testSeekToBeginningForFirstOffset() throws Exception {
  Clock clock = Clock.fixed(Instant.now(), Clock.systemDefaultZone().getZone());
  long ts = clock.millis() - 1;
  ConsumerRecords recordsOneMilliBefore = createConsumerRecordsAtTimestamps(ts);
  Consumer kafka = mock(Consumer.class);
  when(kafka.poll(Duration.ofSeconds(2))).thenReturn(recordsOneMilliBefore);

  // this consumer is at the first position (offset 0), so we expect to go back one offset
  FreshnessTracker.ConsumerOffset consumer =
      new FreshnessTracker.ConsumerOffset("cluster", "group", "topic", 1, 0, false);
  FreshnessMetrics metrics = new FreshnessMetrics();
  FreshnessTracker work = new FreshnessTracker(consumer, kafka, metrics);
  work.setClockForTesting(clock);
  work.run();

  InOrder ordered = inOrder(kafka);
  // seeking the committed offset, but go back 1 to actual message
  ordered.verify(kafka).seekToBeginning(Collections.singletonList(new TopicPartition("topic", 1)));
  ordered.verify(kafka).poll(Duration.ofSeconds(2));
  ordered.verifyNoMoreInteractions();

  double noDelta = 0;
  assertEquals("Freshness should be zero for no lag",
      1, metrics.freshness.labels("cluster", "group", "topic", "1").get(), noDelta);
  assertEquals(0, metrics.failed.labels("cluster", "group").get(), 0);
}
 
Example 22
Source Project: kbear   Source File: ConsumerProxy.java    License: Apache License 2.0 5 votes vote down vote up
protected ConsumerHolder newConsumerHolder(String topicId) {
    ConsumerGroupId consumerGroupId = new ConsumerGroupId(_groupName, topicId);
    _metaManager.registerConsumer(consumerGroupId, () -> _executorService.submit(() -> restartConsumer(topicId)));
    KafkaMetaHolder metaHolder = _metaManager.getMetaHolder();
    Route route = metaHolder.getConsumerGroupRoutes().get(consumerGroupId);
    if (route == null)
        throw new IllegalArgumentException("ConsumerGroup not found: " + consumerGroupId);
    ConsumerGroup consumerGroup = metaHolder.getConsumerGroups().get(consumerGroupId);
    Cluster cluster = metaHolder.getClusters().get(route.getClusterId());
    Topic topic = metaHolder.getTopics().get(route.getTopicId());
    KafkaConsumerConfig kafkaConsumerConfig = constructConsumerConfig(_kafkaConsumerConfig, consumerGroup, topic,
            cluster);
    Consumer consumer = newConsumer(consumerGroupId, kafkaConsumerConfig);
    return new ConsumerHolder<>(consumerGroupId, kafkaConsumerConfig, route, consumer);
}
 
Example 23
Source Project: flink   Source File: KafkaConsumerThreadTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 10000)
public void testCloseWithoutAssignedPartitions() throws Exception {
	// no initial assignment
	final Consumer<byte[], byte[]> mockConsumer = createMockConsumer(
		new LinkedHashMap<TopicPartition, Long>(),
		Collections.<TopicPartition, Long>emptyMap(),
		false,
		null,
		null);

	// setup latch so the test waits until testThread is blocked on getBatchBlocking method
	final MultiShotLatch getBatchBlockingInvoked = new MultiShotLatch();
	final ClosableBlockingQueue<KafkaTopicPartitionState<Object, TopicPartition>> unassignedPartitionsQueue =
		new ClosableBlockingQueue<KafkaTopicPartitionState<Object, TopicPartition>>() {
			@Override
			public List<KafkaTopicPartitionState<Object, TopicPartition>> getBatchBlocking() throws InterruptedException {
				getBatchBlockingInvoked.trigger();
				return super.getBatchBlocking();
			}
		};

	final TestKafkaConsumerThread testThread =
		new TestKafkaConsumerThread(mockConsumer, unassignedPartitionsQueue, new Handover());

	testThread.start();
	getBatchBlockingInvoked.await();
	testThread.shutdown();
	testThread.join();
}
 
Example 24
@Override
public Consumer<byte[], byte[]> getConsumer(Map<String, Object> config) {
  return new TracingKafkaConsumerBuilder<>(
      new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()),
      tracer).withDecorators(spanDecorators).withSpanNameProvider(consumerSpanNameProvider)
      .build();
}
 
Example 25
Source Project: synapse   Source File: KafkaMessageSenderTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void shouldSendBatch() throws Exception {
    // given
    final ExampleJsonObject appleObject = new ExampleJsonObject("apple");
    final ExampleJsonObject bananaObject = new ExampleJsonObject("banana");

    try (final Consumer<String, String> consumer = getKafkaConsumer("someTestGroup")) {
        embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KAFKA_TOPIC);

        // when
        messageSender.sendBatch(Stream.of(
                message("a", appleObject),
                message("b", bananaObject)
        ));

        // then
        final ConsumerRecords<String, String> records = getRecords(consumer, 250L, 2);
        assertThat(records.count(), is(2));

        final ConsumerRecord<String, String> first = getFirst(records.records(KAFKA_TOPIC), null);
        assertThat(first.key(), is("a"));
        assertThat(first.value(), is("{\"value\":\"apple\"}"));

        final ConsumerRecord<String, String> second = getLast(records.records(KAFKA_TOPIC), null);
        assertThat(second.key(), is("b"));
        assertThat(second.value(), is("{\"value\":\"banana\"}"));
    }
}
 
Example 26
private Consumer<?, ?> createConsumer(Properties consumerProps, String bootstrapServers, String groupId) {
  Properties properties = new Properties();
  properties.putAll(consumerProps);
  properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  properties.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
      ByteArrayDeserializer.class.getCanonicalName());
  properties.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
      ByteArrayDeserializer.class.getCanonicalName());
  properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, CONSUMER_AUTO_OFFSET_RESET_CONFIG_LATEST);
  return _listenerConsumerFactory.createConsumer(properties);
}
 
Example 27
Source Project: kbear   Source File: ConsumerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void assignment() throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumer()) {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);

        Set<TopicPartition> assignments = consumer.assignment();
        System.out.println("assignments: " + assignments);
        Assert.assertEquals(_topicPartitions, assignments);
    }
}
 
Example 28
Source Project: kbear   Source File: ConsumerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void assign() throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumer()) {
        consumer.assign(_topicPartitions);
        pollDurationTimeout(consumer);

        Set<TopicPartition> assignments = consumer.assignment();
        System.out.println("assignments: " + assignments);
        Assert.assertEquals(_topicPartitions, assignments);
    }
}
 
Example 29
Source Project: rya   Source File: DeleteQueryCommandIT.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
    // Make sure the topic that the change log uses exists.
    final String changeLogTopic = KafkaTopics.queryChangeLogTopic(ryaInstance);
    System.out.println("Test Change Log Topic: " + changeLogTopic);
    kafka.createTopic(changeLogTopic);

    // Setup the QueryRepository used by the test.
    final Producer<?, QueryChange> queryProducer = KafkaTestUtil.makeProducer(kafka, StringSerializer.class, QueryChangeSerializer.class);
    final Consumer<?, QueryChange>queryConsumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, QueryChangeDeserializer.class);
    final QueryChangeLog changeLog = new KafkaQueryChangeLog(queryProducer, queryConsumer, changeLogTopic);
    queryRepo = new InMemoryQueryRepository(changeLog, Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS));
}
 
Example 30
Source Project: kbear   Source File: ConsumerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void pollLongTimeout() throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumer()) {
        consumer.subscribe(_topics);
        pollLongTimeout(consumer);
    }
}