org.apache.kafka.clients.consumer.ConsumerRecords Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecords. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: kafka-serializer-example   Author: nielsutrecht   File: Example.java    License: MIT License 7 votes vote down vote up
public static void runConsumer(Properties properties, String topic) throws Exception {
    properties.put("group.id", "test");
    properties.put("enable.auto.commit", "true");
    properties.put("auto.commit.interval.ms", "1000");
    properties.put("session.timeout.ms", "30000");
    properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    System.out.printf("Running consumer with serializer %s on topic %s\n", properties.getProperty("value.deserializer"), topic);

    KafkaConsumer<String, SensorReading> consumer = new KafkaConsumer<>(properties);
    consumer.subscribe(Arrays.asList(topic));
    while (true) {
        ConsumerRecords<String, SensorReading> records = consumer.poll(100);
        for (ConsumerRecord<String, SensorReading> record : records)
            System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
    }
}
 
Example #2
Source Project: localization_nifi   Author: wangrenlei   File: ConsumerPoolTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void validatePoolBatchCreatePollClose() throws Exception {
    final byte[][] firstPassValues = new byte[][]{
        "Hello-1".getBytes(StandardCharsets.UTF_8),
        "Hello-2".getBytes(StandardCharsets.UTF_8),
        "Hello-3".getBytes(StandardCharsets.UTF_8)
    };
    final ConsumerRecords<byte[], byte[]> firstRecs = createConsumerRecords("foo", 1, 1L, firstPassValues);

    when(consumer.poll(anyLong())).thenReturn(firstRecs, createConsumerRecords("nifi", 0, 0L, new byte[][]{}));
    try (final ConsumerLease lease = testDemarcatedPool.obtainConsumer(mockSession)) {
        lease.poll();
        lease.commit();
    }
    testDemarcatedPool.close();
    verify(mockSession, times(1)).create();
    verify(mockSession, times(1)).commit();
    final PoolStats stats = testDemarcatedPool.getPoolStats();
    assertEquals(1, stats.consumerCreatedCount);
    assertEquals(1, stats.consumerClosedCount);
    assertEquals(1, stats.leasesObtainedCount);
}
 
Example #3
private void sendTombStoneRecordsAndVerifyGracefulHandling() throws Exception {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
			senderProps);
	try {
		KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
		template.setDefaultTopic("words-1");
		template.sendDefault(null);
		ConsumerRecords<String, String> received = consumer
				.poll(Duration.ofMillis(5000));
		// By asserting that the received record is empty, we are ensuring that the
		// tombstone record
		// was handled by the binder gracefully.
		assertThat(received.isEmpty()).isTrue();
	}
	finally {
		pf.destroy();
	}
}
 
Example #4
Source Project: nifi   Author: apache   File: ConsumerLease.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Executes a poll on the underlying Kafka Consumer and creates any new
 * flowfiles necessary or appends to existing ones if in demarcation mode.
 */
void poll() {
    /**
     * Implementation note:
     * Even if ConsumeKafka is not scheduled to poll due to downstream connection back-pressure is engaged,
     * for longer than session.timeout.ms (defaults to 10 sec), Kafka consumer sends heartbeat from background thread.
     * If this situation lasts longer than max.poll.interval.ms (defaults to 5 min), Kafka consumer sends
     * Leave Group request to Group Coordinator. When ConsumeKafka processor is scheduled again, Kafka client checks
     * if this client instance is still a part of consumer group. If not, it rejoins before polling messages.
     * This behavior has been fixed via Kafka KIP-62 and available from Kafka client 0.10.1.0.
     */
    try {
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10);
        lastPollEmpty = records.count() == 0;
        processRecords(records);
    } catch (final ProcessException pe) {
        throw pe;
    } catch (final Throwable t) {
        this.poison();
        throw t;
    }
}
 
Example #5
Source Project: pinpoint   Author: naver   File: ConsumerPollInterceptor.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void after(Object target, Object[] args, Object result, Throwable throwable) {
    if (isDebug) {
        logger.afterInterceptor(target, args, result, throwable);
    }

    if (!(target instanceof RemoteAddressFieldAccessor)) {
        return;
    }

    String remoteAddress = ((RemoteAddressFieldAccessor) target)._$PINPOINT$_getRemoteAddress();
    if (StringUtils.isEmpty(remoteAddress)) {
        remoteAddress = KafkaConstants.UNKNOWN;
    }

    if (result instanceof ConsumerRecords) {
        Iterator consumerRecordIterator = ((ConsumerRecords) result).iterator();
        while (consumerRecordIterator.hasNext()) {
            Object consumerRecord = consumerRecordIterator.next();
            if (consumerRecord instanceof RemoteAddressFieldAccessor) {
                ((RemoteAddressFieldAccessor) consumerRecord)._$PINPOINT$_setRemoteAddress(remoteAddress);
            }
        }
    }
}
 
Example #6
Source Project: SkaETL   Author: skalogs   File: KafkaUnit.java    License: Apache License 2.0 6 votes vote down vote up
public <K, V> List<Message<K, V>> readMessages(final String topicName, final int maxPoll, final MessageExtractor<K, V> messageExtractor) {
    final Properties props = new Properties();
    props.put("bootstrap.servers", brokerString);
    props.put("group.id", "test");
    props.put("enable.auto.commit", "true");
    props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    props.put("key.deserializer", ByteArrayDeserializer.class.getName());
    props.put("value.deserializer", ByteArrayDeserializer.class.getName());
    props.put("max.poll.records", String.valueOf(maxPoll));
    try (final KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(props)) {
        kafkaConsumer.subscribe(Collections.singletonList(topicName));
        kafkaConsumer.poll(0); // dummy poll
        kafkaConsumer.seekToBeginning(Collections.singletonList(new TopicPartition(topicName, 0)));
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10000);
        final List<Message<K, V>> messages = new ArrayList<>();
        for (ConsumerRecord<byte[], byte[]> record : records) {
            messages.add(messageExtractor.extract(record));
        }
        return messages;
    }
}
 
Example #7
private void testExceptionProcessingByFunction(String topic, LiKafkaConsumer<byte[], byte[]> consumer,
    BiConsumer<LiKafkaConsumer<byte[], byte[]>, TopicPartition> testFunction) throws Exception {
  try {
    consumer.subscribe(Collections.singleton(topic));
    ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
    while (records.isEmpty()) {
      records = consumer.poll(Duration.ofMillis(10));
    }
    assertEquals(records.count(), 4, "Only the first message should be returned");
    assertEquals(records.iterator().next().offset(), 2L, "The offset of the first message should be 2.");
    assertEquals(consumer.position(new TopicPartition(topic, 0)), 7L, "The position should be 7");

    testFunction.accept(consumer, new TopicPartition(topic, 0));
  } finally {
    consumer.close();
  }
}
 
Example #8
Source Project: apm-agent-java   Author: elastic   File: KafkaLegacyClientIT.java    License: Apache License 2.0 6 votes vote down vote up
private void sendTwoRecordsAndConsumeReplies() {
    final StringBuilder callback = new StringBuilder();
    ProducerRecord<String, String> record1 = new ProducerRecord<>(REQUEST_TOPIC, 0, REQUEST_KEY, FIRST_MESSAGE_VALUE);
    ProducerRecord<String, String> record2 = new ProducerRecord<>(REQUEST_TOPIC, REQUEST_KEY, SECOND_MESSAGE_VALUE);
    producer.send(record1);
    producer.send(record2, (metadata, exception) -> callback.append("done"));
    if (testScenario != TestScenario.IGNORE_REQUEST_TOPIC) {
        await().atMost(2000, MILLISECONDS).until(() -> reporter.getSpans().size() == 2);
    }
    ConsumerRecords<String, String> replies = replyConsumer.poll(2000);
    assertThat(callback).isNotEmpty();
    assertThat(replies.count()).isEqualTo(2);
    Iterator<ConsumerRecord<String, String>> iterator = replies.iterator();
    assertThat(iterator.next().value()).isEqualTo(FIRST_MESSAGE_VALUE);
    assertThat(iterator.next().value()).isEqualTo(SECOND_MESSAGE_VALUE);
    // this is required in order to end transactions related to the record iteration
    assertThat(iterator.hasNext()).isFalse();
}
 
Example #9
Source Project: pulsar-java-tutorial   Author: streamlio   File: KafkaAdaptorConsumer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    try {
        consumer.subscribe(topics);

        log.info("Consumer successfully subscribed to topics {}", topics);

        ConsumerRecords<Integer, String> records = consumer.poll(Long.MAX_VALUE);
        records.forEach(record -> {
            log.info("Received record with a key of {} and a value of {}", record.key(), record.value());
        });
    } catch (WakeupException e) {
        // Ignore
    } finally {
        consumer.commitSync();
        log.info("Consumer for topics {} temporarily closed", topics);
        this.run();
    }
}
 
Example #10
Source Project: canal   Author: alibaba   File: KafkaCanalConnector.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
    waitClientRunning();
    if (!running) {
        return Lists.newArrayList();
    }

    ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));

    currentOffsets.clear();
    for (TopicPartition topicPartition : records.partitions()) {
        currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
    }

    if (!records.isEmpty()) {
        List<Message> messages = new ArrayList<>();
        for (ConsumerRecord<String, Message> record : records) {
            messages.add(record.value());
        }
        return messages;
    }
    return Lists.newArrayList();
}
 
Example #11
Source Project: DBus   Author: BriData   File: CheckFlowLineHandler.java    License: Apache License 2.0 6 votes vote down vote up
private void secondStep(BufferedWriter bw, KafkaConsumer<String, byte[]> consumer, long offset) throws Exception {
    boolean isOk = false;
    try {
        long start = System.currentTimeMillis();
        while ((System.currentTimeMillis() - start < 1000 * 15) && !isOk) {
            ConsumerRecords<String, byte[]> records = consumer.poll(1000);
            for (ConsumerRecord<String, byte[]> record : records) {
                if (record.offset() >= offset) {
                    isOk = true;
                    bw.write("data arrive at topic: " + record.topic());
                    bw.newLine();
                    break;
                }
            }
        }
    } catch (Exception e) {
        bw.write("auto check table second step error: " + e.getMessage());
        bw.newLine();
        throw new RuntimeException("auto check table second step error", e);
    }

    if (!isOk) {
        bw.write("flow line second step time out");
        throw new RuntimeException("flow line second step time out");
    }
}
 
Example #12
Source Project: Microservices-Deployment-Cookbook   Author: PacktPublishing   File: GeoLocationConsumer.java    License: MIT License 6 votes vote down vote up
public void run() {
	Properties props = new Properties();
	props.put("bootstrap.servers", "192.168.99.100:9092");
	props.put("group.id", "geolocationConsumer");
	props.put("key.deserializer", StringDeserializer.class.getName());
	props.put("value.deserializer", StringDeserializer.class.getName());

	try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
		consumer.subscribe(Arrays.asList("geolocations"));
		while (true) {
			ConsumerRecords<String, String> records = consumer.poll(100);
			for (ConsumerRecord<String, String> record : records) {
				System.out.printf("offset = %d, key = %s, value = %s%n", 
						record.offset(), 
						record.key(), 
						record.value());

				REPO.addGeoLocation(GSON.fromJson(record.value(), GeoLocation.class));
			}
		}
	} catch (Exception e) {
		System.err.println("Error while consuming geolocations. Details: " + e.getMessage());
	}
}
 
Example #13
Source Project: BigData-In-Practice   Author: whirlys   File: OffsetCommitSyncBatch.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();
    final int minBatchSize = 200;
    List<ConsumerRecord> buffer = new ArrayList<>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.out.println(record.offset() + " : " + record.value());
        }
        if (buffer.size() >= minBatchSize) {
            //do some logical processing with buffer.
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
Example #14
Source Project: BigData-In-Practice   Author: whirlys   File: ProtostuffDeserializer.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    String brokerList = "192.168.0.101:9092";
    String topic = "topic.serialization";
    String groupId = "group.demo";
    Properties properties = new Properties();
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ProtostuffDeserializer.class.getName());
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);

    KafkaConsumer<String, Company> consumer = new KafkaConsumer<String, Company>(properties);
    consumer.subscribe(Collections.singletonList(topic));

    while (true) {
        ConsumerRecords<String, Company> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord record : records) {
            System.out.println(String.format("%s-%s-%s-%s",
                    record.topic(), record.partition(), record.offset(), record.value()));
            // 成功反序列化,输出:topic.serialization-0-1-Company(name=whirly, address=中国)
        }
    }
}
 
Example #15
Source Project: uReplicator   Author: uber   File: TestUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static List<ConsumerRecord<Byte[], Byte[]>> consumeMessage(String bootstrapServer,
    String topicName,
    int timeoutMs
) throws InterruptedException {

  long time = new Date().getTime();
  Consumer<Byte[], Byte[]> consumer = createConsumer(bootstrapServer);
  consumer.subscribe(Collections.singletonList(topicName));

  List<ConsumerRecord<Byte[], Byte[]>> result = new ArrayList<>();
  while ((new Date().getTime()) - time < timeoutMs) {
    ConsumerRecords<Byte[], Byte[]> records = consumer.poll(1000);
    Iterator<ConsumerRecord<Byte[], Byte[]>> iterator = records.iterator();
    while (iterator.hasNext()) {
      result.add(iterator.next());
    }
    Thread.sleep(300);
  }
  consumer.close();
  return result;
}
 
Example #16
Source Project: kafka_book_demo   Author: hiddenzzh   File: ConsumerInterceptorTTL.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    System.out.println("before:" + records);
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            if (now - record.timestamp() < EXPIRE_INTERVAL) {
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example #17
Source Project: kafka_book_demo   Author: hiddenzzh   File: KafkaConsumerAnalysis.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = initConfig();
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    try {
        while (isRunning.get()) {
            ConsumerRecords<String, String> records =
                    consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("topic = " + record.topic()
                        + ", partition = " + record.partition()
                        + ", offset = " + record.offset());
                System.out.println("key = " + record.key()
                        + ", value = " + record.value());
                //do something to process record.
            }
        }
    } catch (Exception e) {
        log.error("occur exception ", e);
    } finally {
        consumer.close();
    }
}
 
Example #18
Source Project: synapse   Author: otto-de   File: KafkaRecordsConsumerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldUpdateDurationBehindHandler() {
    // given
    final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon());

    final ConsumerRecord<String, String> record = someRecord(0, 42L, Clock.systemDefaultZone());

    // when
    final ConsumerRecords<String,String> records = new ConsumerRecords<>(ImmutableMap.of(
            new TopicPartition("foo", 0),
            singletonList(record))
    );
    consumer.apply(records);

    // then
    final long secondsBehind = getSecondsBehind("0");
    assertThat(secondsBehind, is(lessThanOrEqualTo(2L)));
}
 
Example #19
Source Project: synapse   Author: otto-de   File: KafkaRecordsConsumerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldDispatchMessage() {
    // given
    final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon());

    final ConsumerRecord<String, String> record = someRecord(0, 42L, Clock.systemDefaultZone());

    // when
    final ConsumerRecords<String,String> records = new ConsumerRecords<>(ImmutableMap.of(
            new TopicPartition("foo", 0),
            singletonList(record))
    );
    consumer.apply(records);

    // then
    verify(dispatcher).accept(of(Key.of("key"), of(fromPosition("0", "42")), "payload"));
}
 
Example #20
Source Project: apm-agent-java   Author: elastic   File: KafkaLegacyBrokerIT.java    License: Apache License 2.0 6 votes vote down vote up
private void sendTwoRecordsAndConsumeReplies() {
    final StringBuilder callback = new StringBuilder();
    ProducerRecord<String, String> record1 = new ProducerRecord<>(REQUEST_TOPIC, 0, REQUEST_KEY, FIRST_MESSAGE_VALUE);
    ProducerRecord<String, String> record2 = new ProducerRecord<>(REQUEST_TOPIC, REQUEST_KEY, SECOND_MESSAGE_VALUE);
    producer.send(record1);
    producer.send(record2, (metadata, exception) -> callback.append("done"));
    if (testScenario != TestScenario.IGNORE_REQUEST_TOPIC) {
        await().atMost(2000, MILLISECONDS).until(() -> reporter.getTransactions().size() == 2);
        int expectedSpans = (testScenario == TestScenario.NO_CONTEXT_PROPAGATION) ? 2 : 4;
        await().atMost(500, MILLISECONDS).until(() -> reporter.getSpans().size() == expectedSpans);
    }
    //noinspection deprecation - this poll overload is deprecated in newer clients, but enables testing of old ones
    ConsumerRecords<String, String> replies = replyConsumer.poll(2000);
    assertThat(callback).isNotEmpty();
    assertThat(replies.count()).isEqualTo(2);
    Iterator<ConsumerRecord<String, String>> iterator = replies.iterator();
    assertThat(iterator.next().value()).isEqualTo(FIRST_MESSAGE_VALUE);
    assertThat(iterator.next().value()).isEqualTo(SECOND_MESSAGE_VALUE);
    // this is required in order to end transactions related to the record iteration
    assertThat(iterator.hasNext()).isFalse();
}
 
Example #21
Source Project: kafka-webview   Author: SourceLabOrg   File: RecordFilterInterceptorTest.java    License: MIT License 6 votes vote down vote up
private ConsumerRecords createConsumerRecords(final int count) {
    final String topic = "MyTopic";
    final int partition = 0;

    final Map<TopicPartition, List<ConsumerRecord>> recordsMap = new HashMap<>();
    final TopicPartition topicPartition = new TopicPartition(topic, partition);
    final List<ConsumerRecord> consumerRecords = new ArrayList<>();

    for (int x = 0; x < count; x++) {
        consumerRecords.add(
            new ConsumerRecord<Object, Object>(topic, partition, x, "Key" + x, "Value" + x)
        );
    }
    recordsMap.put(topicPartition, consumerRecords);

    return new ConsumerRecords(recordsMap);
}
 
Example #22
Source Project: flink   Author: apache   File: Handover.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Polls the next element from the Handover, possibly blocking until the next element is
 * available. This method behaves similar to polling from a blocking queue.
 *
 * <p>If an exception was handed in by the producer ({@link #reportError(Throwable)}), then
 * that exception is thrown rather than an element being returned.
 *
 * @return The next element (buffer of records, never null).
 *
 * @throws ClosedException Thrown if the Handover was {@link #close() closed}.
 * @throws Exception Rethrows exceptions from the {@link #reportError(Throwable)} method.
 */
@Nonnull
public ConsumerRecords<byte[], byte[]> pollNext() throws Exception {
	synchronized (lock) {
		while (next == null && error == null) {
			lock.wait();
		}

		ConsumerRecords<byte[], byte[]> n = next;
		if (n != null) {
			next = null;
			lock.notifyAll();
			return n;
		}
		else {
			ExceptionUtils.rethrowException(error, error.getMessage());

			// this statement cannot be reached since the above method always throws an exception
			// this is only here to silence the compiler and any warnings
			return ConsumerRecords.empty();
		}
	}
}
 
Example #23
Source Project: javabase   Author: ggj2010   File: AtMostOnceConsumer.java    License: Apache License 2.0 6 votes vote down vote up
private static void processRecords(KafkaConsumer<String, String> consumer) throws InterruptedException {

        while (true) {

            ConsumerRecords<String, String> records = consumer.poll(100);
            long lastOffset = 0;
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
                lastOffset = record.offset();
            }

            System.out.println("lastOffset read: " + lastOffset);

            process();

        }
    }
 
Example #24
Source Project: joyqueue   Author: chubaostream   File: SimpleKafkaConsumer.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfigs.BOOTSTRAP);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaConfigs.GROUP_ID);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG, KafkaConfigs.GROUP_ID);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("test_topic_0"));

        while (true) {
//            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000 * 1));
            ConsumerRecords<String, String> records = consumer.poll(1000 * 1);
            for (ConsumerRecord<String, String> record : records) {
//                System.out.println(String.format("record, key: %s, value: %s, offset: %s, timestamp: %s", record.key(), record.value(), record.offset(), record.timestamp()));
                System.out.println(String.format("record, key: %s, value: %s, offset: %s", record.key(), record.value(), record.offset()));
            }
        }
    }
 
Example #25
Source Project: javabase   Author: ggj2010   File: KafkaConsumerAPITest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * 订阅topic
 * @param consumer
 */
private static void subscribe(KafkaConsumer<Integer, String> consumer) {
    consumer.subscribe(Arrays.asList(KafKaProducerAPITest.TOPIC_API));
    while (true) {
        System.out.println("==start get value beign===");
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        System.out.println("==start get value end===");
        for (ConsumerRecord<Integer, String> record : records)
            System.out.printf("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
    }
}
 
Example #26
Source Project: micronaut-kafka   Author: micronaut-projects   File: BatchConsumerRecordsBinderRegistry.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public <T> Optional<ArgumentBinder<T, ConsumerRecords<?, ?>>> findArgumentBinder(Argument<T> argument, ConsumerRecords<?, ?> source) {
    Class<T> argType = argument.getType();
    if (Iterable.class.isAssignableFrom(argType) || argType.isArray() || Publishers.isConvertibleToPublisher(argType)) {
        Argument<?> batchType = argument.getFirstTypeVariable().orElse(Argument.OBJECT_ARGUMENT);
        List bound = new ArrayList();

        return Optional.of((context, consumerRecords) -> {
            for (ConsumerRecord<?, ?> consumerRecord : consumerRecords) {
                Optional<ArgumentBinder<?, ConsumerRecord<?, ?>>> binder = consumerRecordBinderRegistry.findArgumentBinder((Argument) argument, consumerRecord);
                binder.ifPresent(b -> {
                    Argument<?> newArg = Argument.of(batchType.getType(), argument.getName(), argument.getAnnotationMetadata(), batchType.getTypeParameters());
                    ArgumentConversionContext conversionContext = ConversionContext.of(newArg);
                    ArgumentBinder.BindingResult<?> result = b.bind(
                            conversionContext,
                            consumerRecord);
                    if (result.isPresentAndSatisfied()) {
                        bound.add(result.get());
                    }

                });
            }
            return () -> {
                if (Publisher.class.isAssignableFrom(argument.getType())) {
                    return ConversionService.SHARED.convert(Flowable.fromIterable(bound), argument);
                } else {
                    return ConversionService.SHARED.convert(bound, argument);
                }
            };
        });
    }
    return Optional.empty();
}
 
Example #27
Source Project: apicurio-registry   Author: Apicurio   File: KafkaClients.java    License: Apache License 2.0 5 votes vote down vote up
private static CompletableFuture<Integer> consumeMessages(String topicName, int messageCount, String keyDeserializer, String valueDeserializer) {
    CompletableFuture<Integer> resultPromise = CompletableFuture.supplyAsync(() -> {
        final Consumer<Long, GenericRecord> consumer = (Consumer<Long, GenericRecord>) KafkaClients.createConsumer(
                keyDeserializer, valueDeserializer, topicName);
        consumer.subscribe(Collections.singletonList(topicName));

        AtomicInteger consumedMessages = new AtomicInteger();

        try {
            while (consumedMessages.get() < messageCount) {

                final ConsumerRecords<Long, GenericRecord> records = consumer.poll(Duration.ofSeconds(1));
                if (records.count() == 0) {
                    LOGGER.info("None found");
                } else records.forEach(record -> {
                    consumedMessages.getAndIncrement();
                    LOGGER.info("{} {} {} {}", record.topic(),
                            record.partition(), record.offset(), record.value());
                });
            }

            LOGGER.info("Consumed {} messages", consumedMessages.get());
        } finally {
            consumer.close();
        }

        return consumedMessages.get();
    });

    try {
        resultPromise.get(30, TimeUnit.SECONDS);
    } catch (Exception e) {
        resultPromise.completeExceptionally(e);
    }

    return resultPromise;
}
 
Example #28
Source Project: light-eventuate-4j   Author: networknt   File: AbstractCdcTest.java    License: Apache License 2.0 5 votes vote down vote up
public void waitForEventInKafka(KafkaConsumer<String, String> consumer, String entityId, LocalDateTime deadline) throws InterruptedException {
  while (LocalDateTime.now().isBefore(deadline)) {
    long millis = ChronoUnit.MILLIS.between(LocalDateTime.now(), deadline);
    ConsumerRecords<String, String> records = consumer.poll(millis);
    if (!records.isEmpty()) {
      for (ConsumerRecord<String, String> record : records) {
        if (record.key().equals(entityId)) {
          return;
        }
      }
    }
  }
  throw new RuntimeException("entity not found: " + entityId);
}
 
Example #29
Source Project: spring-cloud-stream-samples   Author: spring-cloud   File: ToUpperCaseProcessorIntTests.java    License: Apache License 2.0 5 votes vote down vote up
@Test
void testMessagesOverKafka() {
	this.template.send(TEST_TOPIC_IN, "test".getBytes());

	Consumer<byte[], String> consumer = this.consumerFactory.createConsumer();

	embeddedKafkaBroker.consumeFromAnEmbeddedTopic(consumer, TEST_TOPIC_OUT);

	ConsumerRecords<byte[], String> replies = KafkaTestUtils.getRecords(consumer);
	assertThat(replies.count()).isEqualTo(1);

	Iterator<ConsumerRecord<byte[], String>> iterator = replies.iterator();
	assertThat(iterator.next().value()).isEqualTo("TEST");
}
 
Example #30
Source Project: mirus   Author: salesforce   File: MirusSourceTaskTest.java    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test
public void testSourceRecordsWorksWithHeaders() {
  final String topic = "topica";
  final int partition = 0;
  final int offset = 123;
  final long timestamp = 314159;

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
  Headers headers = new RecordHeaders();
  headers.add("h1", "v1".getBytes(StandardCharsets.UTF_8));
  headers.add("h2", "v2".getBytes(StandardCharsets.UTF_8));
  records.put(
      new TopicPartition(topic, partition),
      Collections.singletonList(newConsumerRecord(topic, partition, offset, timestamp, headers)));
  ConsumerRecords<byte[], byte[]> pollResult = new ConsumerRecords<>(records);

  List<SourceRecord> result = mirusSourceTask.sourceRecords(pollResult);

  assertThat(
      StreamSupport.stream(result.get(0).headers().spliterator(), false)
          .map(Header::key)
          .collect(Collectors.toList()),
      hasItems("h1", "h2"));
  assertThat(
      StreamSupport.stream(result.get(0).headers().spliterator(), false)
          .map(Header::value)
          .collect(Collectors.toList()),
      hasItems("v1".getBytes(StandardCharsets.UTF_8), "v2".getBytes(StandardCharsets.UTF_8)));
}