org.apache.kafka.clients.consumer.ConsumerRecords Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecords. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Example.java    From kafka-serializer-example with MIT License 7 votes vote down vote up
public static void runConsumer(Properties properties, String topic) throws Exception {
    properties.put("group.id", "test");
    properties.put("enable.auto.commit", "true");
    properties.put("auto.commit.interval.ms", "1000");
    properties.put("session.timeout.ms", "30000");
    properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    System.out.printf("Running consumer with serializer %s on topic %s\n", properties.getProperty("value.deserializer"), topic);

    KafkaConsumer<String, SensorReading> consumer = new KafkaConsumer<>(properties);
    consumer.subscribe(Arrays.asList(topic));
    while (true) {
        ConsumerRecords<String, SensorReading> records = consumer.poll(100);
        for (ConsumerRecord<String, SensorReading> record : records)
            System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
    }
}
 
Example #2
Source File: AtMostOnceConsumer.java    From javabase with Apache License 2.0 6 votes vote down vote up
private static void processRecords(KafkaConsumer<String, String> consumer) throws InterruptedException {

        while (true) {

            ConsumerRecords<String, String> records = consumer.poll(100);
            long lastOffset = 0;
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
                lastOffset = record.offset();
            }

            System.out.println("lastOffset read: " + lastOffset);

            process();

        }
    }
 
Example #3
Source File: KafkaStreamsBinderWordCountIntegrationTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
private void sendTombStoneRecordsAndVerifyGracefulHandling() throws Exception {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
			senderProps);
	try {
		KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
		template.setDefaultTopic("words-1");
		template.sendDefault(null);
		ConsumerRecords<String, String> received = consumer
				.poll(Duration.ofMillis(5000));
		// By asserting that the received record is empty, we are ensuring that the
		// tombstone record
		// was handled by the binder gracefully.
		assertThat(received.isEmpty()).isTrue();
	}
	finally {
		pf.destroy();
	}
}
 
Example #4
Source File: KafkaConsumerAnalysis.java    From kafka_book_demo with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = initConfig();
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    try {
        while (isRunning.get()) {
            ConsumerRecords<String, String> records =
                    consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("topic = " + record.topic()
                        + ", partition = " + record.partition()
                        + ", offset = " + record.offset());
                System.out.println("key = " + record.key()
                        + ", value = " + record.value());
                //do something to process record.
            }
        }
    } catch (Exception e) {
        log.error("occur exception ", e);
    } finally {
        consumer.close();
    }
}
 
Example #5
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Executes a poll on the underlying Kafka Consumer and creates any new
 * flowfiles necessary or appends to existing ones if in demarcation mode.
 */
void poll() {
    /**
     * Implementation note:
     * Even if ConsumeKafka is not scheduled to poll due to downstream connection back-pressure is engaged,
     * for longer than session.timeout.ms (defaults to 10 sec), Kafka consumer sends heartbeat from background thread.
     * If this situation lasts longer than max.poll.interval.ms (defaults to 5 min), Kafka consumer sends
     * Leave Group request to Group Coordinator. When ConsumeKafka processor is scheduled again, Kafka client checks
     * if this client instance is still a part of consumer group. If not, it rejoins before polling messages.
     * This behavior has been fixed via Kafka KIP-62 and available from Kafka client 0.10.1.0.
     */
    try {
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10);
        lastPollEmpty = records.count() == 0;
        processRecords(records);
    } catch (final ProcessException pe) {
        throw pe;
    } catch (final Throwable t) {
        this.poison();
        throw t;
    }
}
 
Example #6
Source File: ConsumerPollInterceptor.java    From pinpoint with Apache License 2.0 6 votes vote down vote up
@Override
public void after(Object target, Object[] args, Object result, Throwable throwable) {
    if (isDebug) {
        logger.afterInterceptor(target, args, result, throwable);
    }

    if (!(target instanceof RemoteAddressFieldAccessor)) {
        return;
    }

    String remoteAddress = ((RemoteAddressFieldAccessor) target)._$PINPOINT$_getRemoteAddress();
    if (StringUtils.isEmpty(remoteAddress)) {
        remoteAddress = KafkaConstants.UNKNOWN;
    }

    if (result instanceof ConsumerRecords) {
        Iterator consumerRecordIterator = ((ConsumerRecords) result).iterator();
        while (consumerRecordIterator.hasNext()) {
            Object consumerRecord = consumerRecordIterator.next();
            if (consumerRecord instanceof RemoteAddressFieldAccessor) {
                ((RemoteAddressFieldAccessor) consumerRecord)._$PINPOINT$_setRemoteAddress(remoteAddress);
            }
        }
    }
}
 
Example #7
Source File: KafkaLegacyBrokerIT.java    From apm-agent-java with Apache License 2.0 6 votes vote down vote up
private void sendTwoRecordsAndConsumeReplies() {
    final StringBuilder callback = new StringBuilder();
    ProducerRecord<String, String> record1 = new ProducerRecord<>(REQUEST_TOPIC, 0, REQUEST_KEY, FIRST_MESSAGE_VALUE);
    ProducerRecord<String, String> record2 = new ProducerRecord<>(REQUEST_TOPIC, REQUEST_KEY, SECOND_MESSAGE_VALUE);
    producer.send(record1);
    producer.send(record2, (metadata, exception) -> callback.append("done"));
    if (testScenario != TestScenario.IGNORE_REQUEST_TOPIC) {
        await().atMost(2000, MILLISECONDS).until(() -> reporter.getTransactions().size() == 2);
        int expectedSpans = (testScenario == TestScenario.NO_CONTEXT_PROPAGATION) ? 2 : 4;
        await().atMost(500, MILLISECONDS).until(() -> reporter.getSpans().size() == expectedSpans);
    }
    //noinspection deprecation - this poll overload is deprecated in newer clients, but enables testing of old ones
    ConsumerRecords<String, String> replies = replyConsumer.poll(2000);
    assertThat(callback).isNotEmpty();
    assertThat(replies.count()).isEqualTo(2);
    Iterator<ConsumerRecord<String, String>> iterator = replies.iterator();
    assertThat(iterator.next().value()).isEqualTo(FIRST_MESSAGE_VALUE);
    assertThat(iterator.next().value()).isEqualTo(SECOND_MESSAGE_VALUE);
    // this is required in order to end transactions related to the record iteration
    assertThat(iterator.hasNext()).isFalse();
}
 
Example #8
Source File: ConsumerPoolTest.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void validatePoolBatchCreatePollClose() throws Exception {
    final byte[][] firstPassValues = new byte[][]{
        "Hello-1".getBytes(StandardCharsets.UTF_8),
        "Hello-2".getBytes(StandardCharsets.UTF_8),
        "Hello-3".getBytes(StandardCharsets.UTF_8)
    };
    final ConsumerRecords<byte[], byte[]> firstRecs = createConsumerRecords("foo", 1, 1L, firstPassValues);

    when(consumer.poll(anyLong())).thenReturn(firstRecs, createConsumerRecords("nifi", 0, 0L, new byte[][]{}));
    try (final ConsumerLease lease = testDemarcatedPool.obtainConsumer(mockSession)) {
        lease.poll();
        lease.commit();
    }
    testDemarcatedPool.close();
    verify(mockSession, times(1)).create();
    verify(mockSession, times(1)).commit();
    final PoolStats stats = testDemarcatedPool.getPoolStats();
    assertEquals(1, stats.consumerCreatedCount);
    assertEquals(1, stats.consumerClosedCount);
    assertEquals(1, stats.leasesObtainedCount);
}
 
Example #9
Source File: KafkaRecordsConsumerTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldDispatchMessage() {
    // given
    final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon());

    final ConsumerRecord<String, String> record = someRecord(0, 42L, Clock.systemDefaultZone());

    // when
    final ConsumerRecords<String,String> records = new ConsumerRecords<>(ImmutableMap.of(
            new TopicPartition("foo", 0),
            singletonList(record))
    );
    consumer.apply(records);

    // then
    verify(dispatcher).accept(of(Key.of("key"), of(fromPosition("0", "42")), "payload"));
}
 
Example #10
Source File: ConsumerInterceptorTTL.java    From kafka_book_demo with Apache License 2.0 6 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    System.out.println("before:" + records);
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            if (now - record.timestamp() < EXPIRE_INTERVAL) {
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example #11
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
public <K, V> List<Message<K, V>> readMessages(final String topicName, final int maxPoll, final MessageExtractor<K, V> messageExtractor) {
    final Properties props = new Properties();
    props.put("bootstrap.servers", brokerString);
    props.put("group.id", "test");
    props.put("enable.auto.commit", "true");
    props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    props.put("key.deserializer", ByteArrayDeserializer.class.getName());
    props.put("value.deserializer", ByteArrayDeserializer.class.getName());
    props.put("max.poll.records", String.valueOf(maxPoll));
    try (final KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(props)) {
        kafkaConsumer.subscribe(Collections.singletonList(topicName));
        kafkaConsumer.poll(0); // dummy poll
        kafkaConsumer.seekToBeginning(Collections.singletonList(new TopicPartition(topicName, 0)));
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10000);
        final List<Message<K, V>> messages = new ArrayList<>();
        for (ConsumerRecord<byte[], byte[]> record : records) {
            messages.add(messageExtractor.extract(record));
        }
        return messages;
    }
}
 
Example #12
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
private void testExceptionProcessingByFunction(String topic, LiKafkaConsumer<byte[], byte[]> consumer,
    BiConsumer<LiKafkaConsumer<byte[], byte[]>, TopicPartition> testFunction) throws Exception {
  try {
    consumer.subscribe(Collections.singleton(topic));
    ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
    while (records.isEmpty()) {
      records = consumer.poll(Duration.ofMillis(10));
    }
    assertEquals(records.count(), 4, "Only the first message should be returned");
    assertEquals(records.iterator().next().offset(), 2L, "The offset of the first message should be 2.");
    assertEquals(consumer.position(new TopicPartition(topic, 0)), 7L, "The position should be 7");

    testFunction.accept(consumer, new TopicPartition(topic, 0));
  } finally {
    consumer.close();
  }
}
 
Example #13
Source File: RecordFilterInterceptorTest.java    From kafka-webview with MIT License 6 votes vote down vote up
private ConsumerRecords createConsumerRecords(final int count) {
    final String topic = "MyTopic";
    final int partition = 0;

    final Map<TopicPartition, List<ConsumerRecord>> recordsMap = new HashMap<>();
    final TopicPartition topicPartition = new TopicPartition(topic, partition);
    final List<ConsumerRecord> consumerRecords = new ArrayList<>();

    for (int x = 0; x < count; x++) {
        consumerRecords.add(
            new ConsumerRecord<Object, Object>(topic, partition, x, "Key" + x, "Value" + x)
        );
    }
    recordsMap.put(topicPartition, consumerRecords);

    return new ConsumerRecords(recordsMap);
}
 
Example #14
Source File: KafkaLegacyClientIT.java    From apm-agent-java with Apache License 2.0 6 votes vote down vote up
private void sendTwoRecordsAndConsumeReplies() {
    final StringBuilder callback = new StringBuilder();
    ProducerRecord<String, String> record1 = new ProducerRecord<>(REQUEST_TOPIC, 0, REQUEST_KEY, FIRST_MESSAGE_VALUE);
    ProducerRecord<String, String> record2 = new ProducerRecord<>(REQUEST_TOPIC, REQUEST_KEY, SECOND_MESSAGE_VALUE);
    producer.send(record1);
    producer.send(record2, (metadata, exception) -> callback.append("done"));
    if (testScenario != TestScenario.IGNORE_REQUEST_TOPIC) {
        await().atMost(2000, MILLISECONDS).until(() -> reporter.getSpans().size() == 2);
    }
    ConsumerRecords<String, String> replies = replyConsumer.poll(2000);
    assertThat(callback).isNotEmpty();
    assertThat(replies.count()).isEqualTo(2);
    Iterator<ConsumerRecord<String, String>> iterator = replies.iterator();
    assertThat(iterator.next().value()).isEqualTo(FIRST_MESSAGE_VALUE);
    assertThat(iterator.next().value()).isEqualTo(SECOND_MESSAGE_VALUE);
    // this is required in order to end transactions related to the record iteration
    assertThat(iterator.hasNext()).isFalse();
}
 
Example #15
Source File: KafkaAdaptorConsumer.java    From pulsar-java-tutorial with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    try {
        consumer.subscribe(topics);

        log.info("Consumer successfully subscribed to topics {}", topics);

        ConsumerRecords<Integer, String> records = consumer.poll(Long.MAX_VALUE);
        records.forEach(record -> {
            log.info("Received record with a key of {} and a value of {}", record.key(), record.value());
        });
    } catch (WakeupException e) {
        // Ignore
    } finally {
        consumer.commitSync();
        log.info("Consumer for topics {} temporarily closed", topics);
        this.run();
    }
}
 
Example #16
Source File: TestUtils.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
public static List<ConsumerRecord<Byte[], Byte[]>> consumeMessage(String bootstrapServer,
    String topicName,
    int timeoutMs
) throws InterruptedException {

  long time = new Date().getTime();
  Consumer<Byte[], Byte[]> consumer = createConsumer(bootstrapServer);
  consumer.subscribe(Collections.singletonList(topicName));

  List<ConsumerRecord<Byte[], Byte[]>> result = new ArrayList<>();
  while ((new Date().getTime()) - time < timeoutMs) {
    ConsumerRecords<Byte[], Byte[]> records = consumer.poll(1000);
    Iterator<ConsumerRecord<Byte[], Byte[]>> iterator = records.iterator();
    while (iterator.hasNext()) {
      result.add(iterator.next());
    }
    Thread.sleep(300);
  }
  consumer.close();
  return result;
}
 
Example #17
Source File: KafkaCanalConnector.java    From canal with Apache License 2.0 6 votes vote down vote up
@Override
public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
    waitClientRunning();
    if (!running) {
        return Lists.newArrayList();
    }

    ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));

    currentOffsets.clear();
    for (TopicPartition topicPartition : records.partitions()) {
        currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
    }

    if (!records.isEmpty()) {
        List<Message> messages = new ArrayList<>();
        for (ConsumerRecord<String, Message> record : records) {
            messages.add(record.value());
        }
        return messages;
    }
    return Lists.newArrayList();
}
 
Example #18
Source File: CheckFlowLineHandler.java    From DBus with Apache License 2.0 6 votes vote down vote up
private void secondStep(BufferedWriter bw, KafkaConsumer<String, byte[]> consumer, long offset) throws Exception {
    boolean isOk = false;
    try {
        long start = System.currentTimeMillis();
        while ((System.currentTimeMillis() - start < 1000 * 15) && !isOk) {
            ConsumerRecords<String, byte[]> records = consumer.poll(1000);
            for (ConsumerRecord<String, byte[]> record : records) {
                if (record.offset() >= offset) {
                    isOk = true;
                    bw.write("data arrive at topic: " + record.topic());
                    bw.newLine();
                    break;
                }
            }
        }
    } catch (Exception e) {
        bw.write("auto check table second step error: " + e.getMessage());
        bw.newLine();
        throw new RuntimeException("auto check table second step error", e);
    }

    if (!isOk) {
        bw.write("flow line second step time out");
        throw new RuntimeException("flow line second step time out");
    }
}
 
Example #19
Source File: Handover.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Polls the next element from the Handover, possibly blocking until the next element is
 * available. This method behaves similar to polling from a blocking queue.
 *
 * <p>If an exception was handed in by the producer ({@link #reportError(Throwable)}), then
 * that exception is thrown rather than an element being returned.
 *
 * @return The next element (buffer of records, never null).
 *
 * @throws ClosedException Thrown if the Handover was {@link #close() closed}.
 * @throws Exception Rethrows exceptions from the {@link #reportError(Throwable)} method.
 */
@Nonnull
public ConsumerRecords<byte[], byte[]> pollNext() throws Exception {
	synchronized (lock) {
		while (next == null && error == null) {
			lock.wait();
		}

		ConsumerRecords<byte[], byte[]> n = next;
		if (n != null) {
			next = null;
			lock.notifyAll();
			return n;
		}
		else {
			ExceptionUtils.rethrowException(error, error.getMessage());

			// this statement cannot be reached since the above method always throws an exception
			// this is only here to silence the compiler and any warnings
			return ConsumerRecords.empty();
		}
	}
}
 
Example #20
Source File: ProtostuffDeserializer.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    String brokerList = "192.168.0.101:9092";
    String topic = "topic.serialization";
    String groupId = "group.demo";
    Properties properties = new Properties();
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ProtostuffDeserializer.class.getName());
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);

    KafkaConsumer<String, Company> consumer = new KafkaConsumer<String, Company>(properties);
    consumer.subscribe(Collections.singletonList(topic));

    while (true) {
        ConsumerRecords<String, Company> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord record : records) {
            System.out.println(String.format("%s-%s-%s-%s",
                    record.topic(), record.partition(), record.offset(), record.value()));
            // 成功反序列化,输出:topic.serialization-0-1-Company(name=whirly, address=中国)
        }
    }
}
 
Example #21
Source File: KafkaRecordsConsumerTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldUpdateDurationBehindHandler() {
    // given
    final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon());

    final ConsumerRecord<String, String> record = someRecord(0, 42L, Clock.systemDefaultZone());

    // when
    final ConsumerRecords<String,String> records = new ConsumerRecords<>(ImmutableMap.of(
            new TopicPartition("foo", 0),
            singletonList(record))
    );
    consumer.apply(records);

    // then
    final long secondsBehind = getSecondsBehind("0");
    assertThat(secondsBehind, is(lessThanOrEqualTo(2L)));
}
 
Example #22
Source File: OffsetCommitSyncBatch.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();
    final int minBatchSize = 200;
    List<ConsumerRecord> buffer = new ArrayList<>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.out.println(record.offset() + " : " + record.value());
        }
        if (buffer.size() >= minBatchSize) {
            //do some logical processing with buffer.
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
Example #23
Source File: GeoLocationConsumer.java    From Microservices-Deployment-Cookbook with MIT License 6 votes vote down vote up
public void run() {
	Properties props = new Properties();
	props.put("bootstrap.servers", "192.168.99.100:9092");
	props.put("group.id", "geolocationConsumer");
	props.put("key.deserializer", StringDeserializer.class.getName());
	props.put("value.deserializer", StringDeserializer.class.getName());

	try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
		consumer.subscribe(Arrays.asList("geolocations"));
		while (true) {
			ConsumerRecords<String, String> records = consumer.poll(100);
			for (ConsumerRecord<String, String> record : records) {
				System.out.printf("offset = %d, key = %s, value = %s%n", 
						record.offset(), 
						record.key(), 
						record.value());

				REPO.addGeoLocation(GSON.fromJson(record.value(), GeoLocation.class));
			}
		}
	} catch (Exception e) {
		System.err.println("Error while consuming geolocations. Details: " + e.getMessage());
	}
}
 
Example #24
Source File: SimpleKafkaConsumer.java    From joyqueue with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfigs.BOOTSTRAP);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaConfigs.GROUP_ID);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG, KafkaConfigs.GROUP_ID);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("test_topic_0"));

        while (true) {
//            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000 * 1));
            ConsumerRecords<String, String> records = consumer.poll(1000 * 1);
            for (ConsumerRecord<String, String> record : records) {
//                System.out.println(String.format("record, key: %s, value: %s, offset: %s, timestamp: %s", record.key(), record.value(), record.offset(), record.timestamp()));
                System.out.println(String.format("record, key: %s, value: %s, offset: %s", record.key(), record.value(), record.offset()));
            }
        }
    }
 
Example #25
Source File: AvroEndpoint.java    From quarkus with Apache License 2.0 5 votes vote down vote up
@GET
@Produces(MediaType.APPLICATION_JSON)
public JsonObject get() {
    final ConsumerRecords<Integer, Pet> records = consumer.poll(Duration.ofMillis(60000));
    if (records.isEmpty()) {
        return null;
    }
    Pet p = records.iterator().next().value();
    // We cannot serialize the returned Pet directly, it contains non-serializable object such as the schema.
    JsonObject result = new JsonObject();
    result.put("name", p.getName());
    result.put("color", p.getColor());
    return result;
}
 
Example #26
Source File: Handover.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Hands over an element from the producer. If the Handover already has an element that was
 * not yet picked up by the consumer thread, this call blocks until the consumer picks up that
 * previous element.
 *
 * <p>This behavior is similar to a "size one" blocking queue.
 *
 * @param element The next element to hand over.
 *
 * @throws InterruptedException
 *                 Thrown, if the thread is interrupted while blocking for the Handover to be empty.
 * @throws WakeupException
 *                 Thrown, if the {@link #wakeupProducer()} method is called while blocking for
 *                 the Handover to be empty.
 * @throws ClosedException
 *                 Thrown if the Handover was closed or concurrently being closed.
 */
public void produce(final ConsumerRecords<byte[], byte[]> element)
		throws InterruptedException, WakeupException, ClosedException {

	checkNotNull(element);

	synchronized (lock) {
		while (next != null && !wakeupProducer) {
			lock.wait();
		}

		wakeupProducer = false;

		// if there is still an element, we must have been woken up
		if (next != null) {
			throw new WakeupException();
		}
		// if there is no error, then this is open and can accept this element
		else if (error == null) {
			next = element;
			lock.notifyAll();
		}
		// an error marks this as closed for the producer
		else {
			throw new ClosedException();
		}
	}
}
 
Example #27
Source File: Handover.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Hands over an element from the producer. If the Handover already has an element that was
 * not yet picked up by the consumer thread, this call blocks until the consumer picks up that
 * previous element.
 *
 * <p>This behavior is similar to a "size one" blocking queue.
 *
 * @param element The next element to hand over.
 *
 * @throws InterruptedException
 *                 Thrown, if the thread is interrupted while blocking for the Handover to be empty.
 * @throws WakeupException
 *                 Thrown, if the {@link #wakeupProducer()} method is called while blocking for
 *                 the Handover to be empty.
 * @throws ClosedException
 *                 Thrown if the Handover was closed or concurrently being closed.
 */
public void produce(final ConsumerRecords<byte[], byte[]> element)
		throws InterruptedException, WakeupException, ClosedException {

	checkNotNull(element);

	synchronized (lock) {
		while (next != null && !wakeupProducer) {
			lock.wait();
		}

		wakeupProducer = false;

		// if there is still an element, we must have been woken up
		if (next != null) {
			throw new WakeupException();
		}
		// if there is no error, then this is open and can accept this element
		else if (error == null) {
			next = element;
			lock.notifyAll();
		}
		// an error marks this as closed for the producer
		else {
			throw new ClosedException();
		}
	}
}
 
Example #28
Source File: TracingKafkaTest.java    From java-kafka-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumerBuilderWithoutDecorators() throws InterruptedException {
  Producer<Integer, String> producer = createTracingProducer();
  producer.send(new ProducerRecord<>("messages", 1, "test"));

  producer.close();

  assertEquals(1, mockTracer.finishedSpans().size());

  ExecutorService executorService = Executors.newSingleThreadExecutor();
  final CountDownLatch latch = new CountDownLatch(1);

  executorService.execute(() -> {
    Consumer<Integer, String> consumer = createConsumerWithDecorators(new ArrayList());

    while (latch.getCount() > 0) {
      ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(100));
      for (ConsumerRecord<Integer, String> record : records) {
        SpanContext spanContext = TracingKafkaUtils
            .extractSpanContext(record.headers(), mockTracer);
        assertNotNull(spanContext);
        assertEquals("test", record.value());
        assertEquals((Integer) 1, record.key());

        consumer.commitSync();
        latch.countDown();
      }
    }
    consumer.close();
  });

  assertTrue(latch.await(30, TimeUnit.SECONDS));

  List<MockSpan> mockSpans = mockTracer.finishedSpans();

  MockSpan span = mockSpans.get(1);
  assertEquals("consumer", span.tags().get("span.kind"));
}
 
Example #29
Source File: KafkaStreamsDlqExampleTests.java    From spring-cloud-stream-samples with Apache License 2.0 5 votes vote down vote up
@Test
public void testKafkaStreamsWordCountProcessor() {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
	DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	try {
		KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
		template.setDefaultTopic("words");
		template.sendDefault("foobar");
		ConsumerRecords<String, String> cr = KafkaTestUtils.getRecords(consumer);
		assertThat(cr.count()).isGreaterThanOrEqualTo(1);
	}
	finally {
		pf.destroy();
	}
}
 
Example #30
Source File: ITKafkaTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
void checkB3Unsampled(ConsumerRecords<String, String> records) {
  // Check that the injected context was not sampled
  assertThat(records)
    .extracting(ConsumerRecord::headers)
    .flatExtracting(TracingConsumerTest::lastHeaders)
    .hasSize(1)
    .allSatisfy(e -> {
      assertThat(e.getKey()).isEqualTo("b3");
      assertThat(e.getValue()).endsWith("-0");
    });
}