Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecords#forEach()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecords#forEach() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConsumerExample.java    From pulsar with Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    String topic = "persistent://public/default/test";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @SuppressWarnings("resource")
    Consumer<Integer, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
Example 2
Source File: KafkaTransportReceiver.java    From baleen with Apache License 2.0 6 votes vote down vote up
@Override
protected String readFromQueue() throws IOException {
  // Kafka will provide a number of documents at one (depending on the max.poll.records)

  // We'll check if we have any documents already, and if not then we poll to get more to refill
  // out queue.

  if (queue.isEmpty()) {

    // No documents in the queue, so ask Kafka for more...
    while (queue.isEmpty()) {
      final ConsumerRecords<String, String> records = consumer.poll(consumerReadTimeout);
      records.forEach(r -> queue.add(r.value()));
    }
  }

  // We know at this point that we have a non-empty queue to pull from
  return queue.poll();
}
 
Example 3
Source File: ClickStreamEnrichmentDriver.java    From registry with Apache License 2.0 6 votes vote down vote up
private void consumeUserActivity() {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
    props.put(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name(), SCHEMA_REGISTRY_URL);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "user-activity-reader");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(AbstractAvroSnapshotDeserializer.SPECIFIC_AVRO_READER, true);

    try (KafkaConsumer<Integer, UserActivity> consumer = new KafkaConsumer<>(props)) {
        consumer.subscribe(Collections.singleton(USER_ACTIVITY_TOPIC));
        while (true) {
            final ConsumerRecords<Integer, UserActivity> consumerRecords = consumer.poll(Duration.ofSeconds(1));
            consumerRecords.forEach(System.out::println);
        }
    }
}
 
Example 4
Source File: KafkaAdaptorConsumer.java    From pulsar-java-tutorial with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    try {
        consumer.subscribe(topics);

        log.info("Consumer successfully subscribed to topics {}", topics);

        ConsumerRecords<Integer, String> records = consumer.poll(Long.MAX_VALUE);
        records.forEach(record -> {
            log.info("Received record with a key of {} and a value of {}", record.key(), record.value());
        });
    } catch (WakeupException e) {
        // Ignore
    } finally {
        consumer.commitSync();
        log.info("Consumer for topics {} temporarily closed", topics);
        this.run();
    }
}
 
Example 5
Source File: PeriodicNotificationExporterIT.java    From rya with Apache License 2.0 6 votes vote down vote up
private Set<BindingSet> getBindingSetsFromKafka(final String topicName) {
    KafkaConsumer<String, BindingSet> consumer = null;

    try {
        consumer = makeBindingSetConsumer(topicName);
        final ConsumerRecords<String, BindingSet> records = consumer.poll(20000);  // Wait up to 20 seconds for a result to be published.

        final Set<BindingSet> bindingSets = new HashSet<>();
        records.forEach(x -> bindingSets.add(x.value()));

        return bindingSets;

    } catch (final Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example 6
Source File: SampleRawConsumer.java    From kafka-encryption with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {

    Properties consumerProperties = new Properties();
    consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "sampleraw");
    consumerProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    try (KafkaConsumer<Long, String> consumer = new KafkaConsumer<Long, String>(
            consumerProperties,
            new LongDeserializer(),
            new StringDeserializer())) {

        consumer.subscribe(Collections.singleton("sampletopic"));
        for (; true; ) {
            ConsumerRecords<Long, String> records = consumer.poll(1000L);
            records.forEach(
                    record -> System.out.println(
                        "-------------------------------------------------------------\n" +
                        "raw record: key=" + record.key() + ", offset=" + record.offset() + ", value=" + record.value() +
                        "\n-------------------------------------------------------------\n\n"
                    )
            );
        }
    }
}
 
Example 7
Source File: SampleDecryptingConsumer.java    From kafka-encryption with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    // tag::consume[]
    // The key is embedded in each message
    PerRecordKeyProvider keyProvider = new PerRecordKeyProvider(masterKeyEncryption);

    // The payload is encrypted using AES
    AesGcmNoPaddingCryptoAlgorithm cryptoAlgorithm = new AesGcmNoPaddingCryptoAlgorithm();
    Decryptor decryptor = new DefaultDecryptor(keyProvider, cryptoAlgorithm);

    // Construct decrypting deserializer
    CryptoDeserializerFactory cryptoDeserializerFactory = new CryptoDeserializerFactory(decryptor);

    Properties consumerProperties = new Properties();
    consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "samplecrypted");
    consumerProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    try (KafkaConsumer<Long, String> consumer = new KafkaConsumer<Long, String>(
            consumerProperties,
            new LongDeserializer(),
            cryptoDeserializerFactory.buildFrom(new StringDeserializer()))) {

        consumer.subscribe(Collections.singleton("sampletopic"));
        for (; true; ) {
            ConsumerRecords<Long, String> records = consumer.poll(1000L);
            records.forEach(
                    record -> System.out.println(
                    "-------------------------------------------------------------\n" +
                    "decrypted record: key=" + record.key() + ", offset=" + record.offset() + ", value=" + record.value() +
                    "\n-------------------------------------------------------------\n\n"
                    )
            );
        }
    }
    // end::consume[]
}
 
Example 8
Source File: SampleDecryptingConsumer.java    From kafka-encryption with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    // tag::consume[]
    // The key is embedded in each message

    Decryptor decryptor = new DefaultDecryptor(keyProvider, cryptoAlgorithm);

    // Construct decrypting deserializer
    CryptoDeserializerFactory cryptoDeserializerFactory = new CryptoDeserializerFactory(decryptor);

    Properties consumerProperties = new Properties();
    consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "samplecrypted");
    consumerProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    try (KafkaConsumer<Long, String> consumer = new KafkaConsumer<Long, String>(
            consumerProperties,
            new LongDeserializer(),
            cryptoDeserializerFactory.buildFrom(new StringDeserializer()))) {

        consumer.subscribe(Collections.singleton("sampletopic"));
        for (; true; ) {
            ConsumerRecords<Long, String> records = consumer.poll(1000L);
            records.forEach(
                    record -> System.out.println(
                                "-------------------------------------------------------------\n" +
                                "decrypted record: key=" + record.key() + ", offset=" + record.offset() + ", value=" + record.value() +
                                "\n-------------------------------------------------------------\n\n")
            );
        }
    }
    // end::consume[]
}
 
Example 9
Source File: DemoConsumerAutoCommit.java    From KafkaExample with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
	args = new String[] { "kafka0:19092", "words", "group1", "consumer2" };
	if (args == null || args.length != 4) {
		System.err.println(
				"Usage:\n\tjava -jar kafka_consumer.jar ${bootstrap_server} ${topic_name} ${group_name} ${client_id}");
		System.exit(1);
	}
	String bootstrap = args[0];
	String topic = args[1];
	String groupid = args[2];
	String clientid = args[3];

	Properties props = new Properties();
	props.put("bootstrap.servers", bootstrap);
	props.put("group.id", groupid);
	props.put("client.id", clientid);
	props.put("enable.auto.commit", "true");
	props.put("auto.commit.interval.ms", "1000");
       props.put("auto.offset.reset", "earliest");
	props.put("key.deserializer", StringDeserializer.class.getName());
	props.put("value.deserializer", StringDeserializer.class.getName());
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	consumer.subscribe(Arrays.asList(topic));
	while (true) {
		ConsumerRecords<String, String> records = consumer.poll(100);
		records.forEach(record -> {
			System.out.printf("client : %s , topic: %s , partition: %d , offset = %d, key = %s, value = %s%n", clientid, record.topic(),
					record.partition(), record.offset(), record.key(), record.value());
		});
	}
}
 
Example 10
Source File: AssignedPartitions.java    From ja-micro with Apache License 2.0 5 votes vote down vote up
void enqueue(ConsumerRecords<String, byte[]> records) {
    records.forEach((record) -> {
        TopicPartition partitionKey = new TopicPartition(record.topic(), record.partition());

        PartitionProcessor processor = processors.get(partitionKey);
        if (processor == null) {
            processor = assignNewPartition(partitionKey);
        }

        processor.enqueue(record);
    });
}
 
Example 11
Source File: SchemaRegistryConsumer.java    From blog with MIT License 5 votes vote down vote up
public static void main(String[] args) {

    /** TODO: 设置 Consumer 属性 */
    Properties properties = new Properties();
    /** TODO: Kafka 服务地址 */
    properties.put(
        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
    /** TODO: Key 序列化类 */
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    /** TODO: Value 序列化类 */
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class);
    /** TODO: Consumer 组 */
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_schema");

    /** TODO: 设置 schema.registry */
    properties.put("schema.registry.url", "http://node-160:8081");

    /** TODO: 创建 Consumer */
    KafkaConsumer<String, GenericRecord> consumer = new KafkaConsumer<>(properties);

    /** TODO: 订阅主题:可以使用 Pattern.compile("") 正则表达式 */
    consumer.subscribe(Arrays.asList("topic01"));

    /** TODO: 遍历消息队列 */
    try {
      while (true) {
        /** TODO: 设置间隔多长时间获取消息 */
        ConsumerRecords<String, GenericRecord> consumerRecords =
            consumer.poll(Duration.ofSeconds(1));
        consumerRecords.forEach(
            r ->
                System.out.printf(
                    "partition = %d, offset = %d, key = %s, value = %s%n",
                    r.partition(), r.offset(), r.key(), r.value()));
      }
    } finally {
      /** TODO: 关闭 Consumer */
      consumer.close();
    }
  }
 
Example 12
Source File: Consumer.java    From blog with MIT License 5 votes vote down vote up
public static void main(String[] args) {
  /** TODO: 设置 Consumer 属性 */
  Properties properties = new Properties();
  /** TODO: Kafka 服务地址 */
  properties.put(
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
  /** TODO: Key 序列化类 */
  properties.put(
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  /** TODO: Value 序列化类 */
  properties.put(
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  /** TODO: Consumer 组 */
  properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_2");

  /** TODO: 创建 Consumer */
  KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);

  /** TODO: 订阅主题:可以使用 Pattern.compile("") 正则表达式 */
  consumer.subscribe(Arrays.asList("topic01"));

  /** TODO: 遍历消息队列 */
  try {
    while (true) {
      /** TODO: 设置间隔多长时间获取消息 */
      ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
      consumerRecords.forEach(
          r ->
              System.out.printf(
                  "partition = %d, offset = %d, key = %s, value = %s%n",
                  r.partition(), r.offset(), r.key(), r.value()));
    }
  } finally {
    /** TODO: 关闭 Consumer */
    consumer.close();
  }
}
 
Example 13
Source File: DemoConsumerAssign.java    From KafkaExample with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
	args = new String[] { "kafka0:9092", "topic1", "group1", "consumer3" };
	if (args == null || args.length != 4) {
		System.err.println(
				"Usage:\n\tjava -jar kafka_consumer.jar ${bootstrap_server} ${topic_name} ${group_name} ${client_id}");
		System.exit(1);
	}
	String bootstrap = args[0];
	String topic = args[1];
	String groupid = args[2];
	String clientid = args[3];

	Properties props = new Properties();
	props.put("bootstrap.servers", bootstrap);
	props.put("group.id", groupid);
	props.put("client.id", clientid);
	props.put("enable.auto.commit", "true");
	props.put("auto.commit.interval.ms", "1000");
	props.put("key.deserializer", StringDeserializer.class.getName());
	props.put("value.deserializer", StringDeserializer.class.getName());
	props.put("auto.offset.reset", "earliest");
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	consumer.assign(Arrays.asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1)));
	while (true) {
		ConsumerRecords<String, String> records = consumer.poll(100);
		records.forEach(record -> {
			System.out.printf("client : %s , topic: %s , partition: %d , offset = %d, key = %s, value = %s%n", clientid, record.topic(),
					record.partition(), record.offset(), record.key(), record.value());
		});
	}
}
 
Example 14
Source File: RestartTest.java    From kbear with Apache License 2.0 5 votes vote down vote up
protected void consumeMessages(Consumer<String, String> consumer, int defaultMessageCount, String topic,
        int topicMessageCount) {
    long onStart = System.currentTimeMillis();
    Duration pollTimeout2 = Duration.ofMillis(_pollTimeout);
    Map<String, AtomicInteger> actualTopicMessageCount = new HashMap<>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(pollTimeout2);
        records.forEach(r -> {
            actualTopicMessageCount.computeIfAbsent(r.topic(), k -> new AtomicInteger()).incrementAndGet();
            System.out.printf("\nrecord: %s\n", r);
        });

        AtomicBoolean hasInComplete = new AtomicBoolean();
        _topics.forEach(t -> {
            AtomicInteger actualCount = actualTopicMessageCount.computeIfAbsent(t, k -> new AtomicInteger());
            int messageCount = Objects.equals(t, topic) ? topicMessageCount : defaultMessageCount;
            if (actualCount.get() < messageCount)
                hasInComplete.set(true);
        });

        if (!hasInComplete.get())
            break;

        long timeElipsed = System.currentTimeMillis() - onStart;
        if (timeElipsed >= _waitTimeout)
            Assert.fail("consume timeout, only consumed: " + actualTopicMessageCount
                    + " messages, expected messageCount: " + _messageCount + ", expected " + topic
                    + " messageCount: " + topicMessageCount);
    }
}
 
Example 15
Source File: KafkaQueryChangeLog.java    From rya with Apache License 2.0 5 votes vote down vote up
private void maybePopulateCache() {
    // If the cache isn't initialized yet, or it is empty, then check to see if there is more to put into it.
    if (iterCache == null || !iterCache.hasNext()) {
        final ConsumerRecords<?, QueryChange> records = consumer.poll(3000L);
        final List<ChangeLogEntry<QueryChange>> changes = new ArrayList<>();
        records.forEach(record -> changes.add(new ChangeLogEntry<>(record.offset(), record.value())));
        iterCache = changes.iterator();
    }
}
 
Example 16
Source File: KafkaApiTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testProducerConsumerMixedSchemaWithPulsarKafkaClient() throws Exception {
    String topic = "testProducerConsumerMixedSchemaWithPulsarKafkaClient";

    Schema<String> keySchema = new PulsarKafkaSchema<>(new StringSerializer(), new StringDeserializer());
    JSONSchema<Foo> valueSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Properties props = new Properties();
    props.put("bootstrap.servers", getPlainTextServiceUrl());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @Cleanup
    Consumer<String, Foo> consumer = new KafkaConsumer<>(props, keySchema, valueSchema);
    consumer.subscribe(Arrays.asList(topic));

    Producer<String, Foo> producer = new KafkaProducer<>(props, keySchema, valueSchema);

    for (int i = 0; i < 10; i++) {
        Foo foo = new Foo();
        foo.setField1("field1");
        foo.setField2("field2");
        foo.setField3(i);
        producer.send(new ProducerRecord<>(topic, "hello" + i, foo));
    }
    producer.flush();
    producer.close();

    AtomicInteger received = new AtomicInteger();
    while (received.get() < 10) {
        ConsumerRecords<String, Foo> records = consumer.poll(100);
        if (!records.isEmpty()) {
            records.forEach(record -> {
                String key = record.key();
                Assert.assertEquals(key, "hello" + received.get());
                Foo value = record.value();
                Assert.assertEquals(value.getField1(), "field1");
                Assert.assertEquals(value.getField2(), "field2");
                Assert.assertEquals(value.getField3(), received.get());
                received.incrementAndGet();
            });

            consumer.commitSync();
        }
    }
}
 
Example 17
Source File: KafkaApiTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test(timeOut = 30000)
public void testSimpleProducerConsumer() throws Exception {
    String topic = "persistent://public/default/testSimpleProducerConsumer";

    Properties producerProperties = new Properties();
    producerProperties.put("bootstrap.servers", getPlainTextServiceUrl());
    producerProperties.put("key.serializer", IntegerSerializer.class.getName());
    producerProperties.put("value.serializer", StringSerializer.class.getName());
    Producer<Integer, String> producer = new KafkaProducer<>(producerProperties);

    Properties consumerProperties = new Properties();
    consumerProperties.put("bootstrap.servers", getPlainTextServiceUrl());
    consumerProperties.put("group.id", "my-subscription-name");
    consumerProperties.put("key.deserializer", IntegerDeserializer.class.getName());
    consumerProperties.put("value.deserializer", StringDeserializer.class.getName());
    consumerProperties.put("enable.auto.commit", "true");
    Consumer<Integer, String> consumer = new KafkaConsumer<>(consumerProperties);
    consumer.subscribe(Arrays.asList(topic));

    List<Long> offsets = new ArrayList<>();

    for (int i = 0; i < 10; i++) {
        RecordMetadata md = producer.send(new ProducerRecord<Integer, String>(topic, i, "hello-" + i)).get();
        offsets.add(md.offset());
        log.info("Published message at {}", Long.toHexString(md.offset()));
    }

    producer.flush();
    producer.close();

    AtomicInteger received = new AtomicInteger();
    while (received.get() < 10) {
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        records.forEach(record -> {
            assertEquals(record.key().intValue(), received.get());
            assertEquals(record.value(), "hello-" + received.get());
            assertEquals(record.offset(), offsets.get(received.get()).longValue());

            received.incrementAndGet();
        });

        consumer.commitSync();
    }

    consumer.close();
}
 
Example 18
Source File: DuplicatePublishingDetector.java    From light-eventuate-4j with Apache License 2.0 4 votes vote down vote up
private Optional<BinlogFileOffset> fetchMaxOffsetFor(String destinationTopic) {
  String subscriberId = "duplicate-checker-" + destinationTopic + "-" + System.currentTimeMillis();
  Properties consumerProperties = ConsumerPropertiesFactory.makeConsumerProperties(config, subscriberId);
  KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);

  List<PartitionInfo> partitions = EventuateKafkaConsumer.verifyTopicExistsBeforeSubscribing(consumer, destinationTopic);

  List<TopicPartition> topicPartitionList = partitions.stream().map(p -> new TopicPartition(destinationTopic, p.partition())).collect(toList());
  consumer.assign(topicPartitionList);
  consumer.poll(0);

  logger.info("Seeking to end");

  try {
    consumer.seekToEnd(topicPartitionList);
  } catch (IllegalStateException e) {
    logger.error("Error seeking " + destinationTopic, e);
    return Optional.empty();
  }
  List<PartitionOffset> positions = topicPartitionList.stream()
          .map(tp -> new PartitionOffset(tp.partition(), consumer.position(tp) - 1))
          .filter(po -> po.offset >= 0)
          .collect(toList());

  logger.info("Seeking to positions=" + positions);

  positions.forEach(po -> {
    consumer.seek(new TopicPartition(destinationTopic, po.partition), po.offset);
  });

  logger.info("Polling for records");

  List<ConsumerRecord<String, String>> records = new ArrayList<>();
  while (records.size()<positions.size()) {
    ConsumerRecords<String, String> consumerRecords = consumer.poll(1000);
    consumerRecords.forEach(records::add);
  }

  logger.info("Got records: {}", records.size());
  Optional<BinlogFileOffset> max = StreamSupport.stream(records.spliterator(), false).map(record -> {
    logger.info(String.format("got record: %s %s %s", record.partition(), record.offset(), record.value()));
    return JSonMapper.fromJson(record.value(), PublishedEvent.class).getBinlogFileOffset();
  }).filter(binlogFileOffset -> binlogFileOffset!=null).max((blfo1, blfo2) -> blfo1.isSameOrAfter(blfo2) ? 1 : -1);
  consumer.close();
  return max;
}
 
Example 19
Source File: KafkaApiTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testProducerConsumerJsonSchemaWithPulsarKafkaClient() throws Exception {
    String topic = "testProducerConsumerJsonSchemaWithPulsarKafkaClient";

    JSONSchema<Bar> barSchema = JSONSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build());
    JSONSchema<Foo> fooSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Properties props = new Properties();
    props.put("bootstrap.servers", getPlainTextServiceUrl());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @Cleanup
    Consumer<Bar, Foo> consumer = new KafkaConsumer<>(props, barSchema, fooSchema);
    consumer.subscribe(Arrays.asList(topic));

    Producer<Bar, Foo> producer = new KafkaProducer<>(props, barSchema, fooSchema);

    for (int i = 0; i < 10; i++) {
        Bar bar = new Bar();
        bar.setField1(true);

        Foo foo = new Foo();
        foo.setField1("field1");
        foo.setField2("field2");
        foo.setField3(i);
        producer.send(new ProducerRecord<>(topic, bar, foo));
    }
    producer.flush();
    producer.close();

    AtomicInteger received = new AtomicInteger();
    while (received.get() < 10) {
        ConsumerRecords<Bar, Foo> records = consumer.poll(100);
        if (!records.isEmpty()) {
            records.forEach(record -> {
                Bar key = record.key();
                Assert.assertTrue(key.isField1());
                Foo value = record.value();
                Assert.assertEquals(value.getField1(), "field1");
                Assert.assertEquals(value.getField2(), "field2");
                Assert.assertEquals(value.getField3(), received.get());
                received.incrementAndGet();
            });

            consumer.commitSync();
        }
    }
}
 
Example 20
Source File: SpecificClientIntegrationITCase.java    From kiqr with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void produceMessages() throws Exception{
    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", KAFKA_HOST + ":" + KAFKA_PORT);
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.LongSerializer");
    producerProps.put("linger.ms", 0);


    try(KafkaProducer<String, Long> producer = new KafkaProducer<>(producerProps)){
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 0L, "key1", 1L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 100L, "key1", 2L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 100000L, "key1", 3L));


        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 0L, "key2", 4L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 100000L, "key2", 5L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 100001L, "key2", 6L));

        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 0L, "key3", 7L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 50000L, "key3", 8L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 100001L, "key3", 9L));


        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 0L, "key4", 10L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 1L, "key4", 11L));
        producer.send(new ProducerRecord<String, Long>(TOPIC, 0, 2L, "key4", 12L));

    }

    CountDownLatch cdl = new CountDownLatch(12);


    Properties consumerProps = new Properties();
    consumerProps.put("bootstrap.servers",  KAFKA_HOST + ":" + KAFKA_PORT);
    consumerProps.put("group.id", UUID.randomUUID().toString());
    consumerProps.put("enable.auto.commit", "true");
    consumerProps.put("auto.offset.reset", "earliest");
    consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.LongDeserializer");


    Runnable consumerRunnable = () -> {
        KafkaConsumer<String, Long> consumer = new KafkaConsumer<>(consumerProps);

        consumer.subscribe(Collections.singleton(TOPIC));

        int tryCount = 10;
        while(true){
            ConsumerRecords<String, Long> records = consumer.poll(500);
            records.forEach(rec -> cdl.countDown());

            tryCount--;
            if(cdl.getCount() == 0){
                consumer.close();
                return;
            } else if(tryCount == 0){
                throw new RuntimeException("times up");
            }
        }
    };

    consumerRunnable.run();

    cdl.await(10000, TimeUnit.MILLISECONDS);


    KStreamBuilder builder = new KStreamBuilder();
    KTable<String, Long> kv = builder.table(Serdes.String(), Serdes.Long(), TOPIC, "kv");

    kv.toStream().groupByKey().count(TimeWindows.of(10000L), "window");

    Properties streamProps = new Properties();
    streamProps.put(StreamsConfig.APPLICATION_ID_CONFIG, UUID.randomUUID().toString());
    streamProps.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,  KAFKA_HOST + ":" + KAFKA_PORT);
    streamProps.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    streamProps.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamProps.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass().getName());

    CountDownLatch streamCdl = new CountDownLatch(2);


    RestKiqrServerVerticle.Builder verticleBuilder = RestKiqrServerVerticle.Builder.serverBuilder(builder, streamProps);
    RuntimeVerticle.Builder builder1 = verticleBuilder.withPort(44321).withStateListener((newState, oldState) -> {
        if (newState == KafkaStreams.State.RUNNING) streamCdl.countDown();
        System.out.println(oldState + " - " + newState);
    });

    AbstractVerticle verticle = verticleBuilder.build();

    CountDownLatch verticleCdl = new CountDownLatch(1);
    VERTX.deployVerticle(verticle, handler -> {
        verticleCdl.countDown();
    });

    streamCdl.await(100000, TimeUnit.MILLISECONDS);
    verticleCdl.await(100000, TimeUnit.MILLISECONDS);

}