Java Code Examples for org.apache.kafka.clients.consumer.Consumer#poll()

The following examples show how to use org.apache.kafka.clients.consumer.Consumer#poll() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConsumerExample.java    From pulsar with Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    String topic = "persistent://public/default/test";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @SuppressWarnings("resource")
    Consumer<Integer, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
Example 2
Source File: KafkaTestUtil.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Polls a {@link Consumer} until it has either polled too many times without hitting the target number
 * of results, or it hits the target number of results.
 *
 * @param pollMs - How long each poll could take.
 * @param pollIterations - The maximum number of polls that will be attempted.
 * @param targetSize - The number of results to read before stopping.
 * @param consumer - The consumer that will be polled.
 * @return The results that were read from the consumer.
 * @throws Exception If the poll failed.
 */
public static <K, V> List<V> pollForResults(
        final int pollMs,
        final int pollIterations,
        final int targetSize,
        final Consumer<K, V> consumer) throws Exception {
    requireNonNull(consumer);

    final List<V> values = new ArrayList<>();

    int i = 0;
    while(values.size() < targetSize && i < pollIterations) {
        for(final ConsumerRecord<K, V> record : consumer.poll(pollMs)) {
            values.add( record.value() );
        }
        i++;
    }

    return values;
}
 
Example 3
Source File: IntegrationTestUtils.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
/**
 * Returns up to `maxMessages` by reading via the provided consumer (the topic(s) to read from
 * are already configured in the consumer).
 *
 * @param topic          Kafka topic to read messages from
 * @param consumer       Kafka consumer
 * @param waitTime       Maximum wait time in milliseconds
 * @param maxMessages    Maximum number of messages to read via the consumer
 * @return The KeyValue elements retrieved via the consumer
 */
private static <K, V> List<KeyValue<K, V>> readKeyValues(final String topic,
                                                         final Consumer<K, V> consumer, final long waitTime, final int maxMessages) {
  final List<KeyValue<K, V>> consumedValues;
  consumer.subscribe(Collections.singletonList(topic));
  final int pollIntervalMs = 100;
  consumedValues = new ArrayList<>();
  int totalPollTimeMs = 0;
  while (totalPollTimeMs < waitTime &&
          continueConsuming(consumedValues.size(), maxMessages)) {
    totalPollTimeMs += pollIntervalMs;
    final ConsumerRecords<K, V> records = consumer.poll(pollIntervalMs);
    for (final ConsumerRecord<K, V> record : records) {
      consumedValues.add(new KeyValue<>(record.key(), record.value()));
    }
  }
  return consumedValues;
}
 
Example 4
Source File: KafkaNativeSerializationApplicationTests.java    From spring-cloud-stream-samples with Apache License 2.0 6 votes vote down vote up
@Test
public void testSendReceive() {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka());
	senderProps.put("value.serializer", StringSerializer.class);
	DefaultKafkaProducerFactory<byte[], String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	KafkaTemplate<byte[], String> template = new KafkaTemplate<>(pf, true);
	template.setDefaultTopic(INPUT_TOPIC);
	template.sendDefault("foo");

	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps(GROUP_NAME, "false", embeddedKafka.getEmbeddedKafka());
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	consumerProps.put("value.deserializer", MyJsonDeserializer.class);
	DefaultKafkaConsumerFactory<byte[], Person> cf = new DefaultKafkaConsumerFactory<>(consumerProps);

	Consumer<byte[], Person> consumer = cf.createConsumer();
	consumer.subscribe(Collections.singleton(OUTPUT_TOPIC));
	ConsumerRecords<byte[], Person> records = consumer.poll(Duration.ofSeconds(10));
	consumer.commitSync();

	assertThat(records.count()).isEqualTo(1);
	assertThat(new String(records.iterator().next().value().getName())).isEqualTo("foo");
}
 
Example 5
Source File: TracingConsumerTest.java    From brave with Apache License 2.0 6 votes vote down vote up
@Test
public void should_call_wrapped_poll_and_close_spans() {
  consumer.addRecord(consumerRecord);
  Consumer<String, String> tracingConsumer = kafkaTracing.consumer(consumer);
  tracingConsumer.poll(10);

  // offset changed
  assertThat(consumer.position(topicPartition)).isEqualTo(2L);


  MutableSpan consumerSpan = spans.get(0);
  assertThat(consumerSpan.kind()).isEqualTo(CONSUMER);
  assertThat(consumerSpan.name()).isEqualTo("poll");
  assertThat(consumerSpan.tags())
    .containsOnly(entry("kafka.topic", "myTopic"));
}
 
Example 6
Source File: KafkaProducer09IT.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void verify(
    final String topic,
    final int numMessages,
    final String metadataBrokerList,
    String message
) {
  Properties props = new Properties();
  props.put("bootstrap.servers", metadataBrokerList);
  props.put("group.id", "test");
  props.put("enable.auto.commit", "false");
  props.put("auto.commit.interval.ms", "1000");
  props.put("auto.offset.reset", "earliest");
  props.put("session.timeout.ms", "30000");
  props.put(KafkaConstants.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  props.put(KafkaConstants.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Consumer<String, String> consumer = new KafkaConsumer<>(props);
  consumer.subscribe(Collections.singletonList(topic));
  List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
  while (buffer.size() < 1) {
    ConsumerRecords<String, String> records = consumer.poll(1000);
    for (ConsumerRecord<String, String> record : records) {
      buffer.add(record);
    }
  }
  Assert.assertEquals(numMessages, buffer.size());
  Assert.assertEquals(message, buffer.get(0).value());
}
 
Example 7
Source File: BasicTest.java    From kbear with Apache License 2.0 5 votes vote down vote up
protected void consumeMessages(Consumer<String, String> consumer, Map<String, Integer> topicMessageCount,
        long waitTimeout, long pollTimeout) {
    long onStart = System.currentTimeMillis();
    Duration pollTimeout2 = Duration.ofMillis(pollTimeout);
    Map<String, AtomicInteger> actualTopicMessageCount = new HashMap<>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(pollTimeout2);
        records.forEach(r -> {
            actualTopicMessageCount.computeIfAbsent(r.topic(), k -> new AtomicInteger()).incrementAndGet();
            System.out.printf("\nrecord: %s\n", r);
        });

        AtomicBoolean hasInComplete = new AtomicBoolean();
        topicMessageCount.forEach((t, c) -> {
            AtomicInteger actualCount = actualTopicMessageCount.computeIfAbsent(t, k -> new AtomicInteger());
            if (actualCount.get() < c)
                hasInComplete.set(true);
        });

        if (!hasInComplete.get())
            break;

        long timeElipsed = System.currentTimeMillis() - onStart;
        if (timeElipsed >= waitTimeout)
            Assert.fail("consume timeout, only consumed: " + actualTopicMessageCount + " messages, expected: "
                    + topicMessageCount);
    }
}
 
Example 8
Source File: KeycloakRefreshTokenWithJwtValidationTest.java    From strimzi-kafka-oauth with Apache License 2.0 5 votes vote down vote up
@Test
public void doTest() throws Exception {
    System.out.println("==== KeycloakRefreshTokenWithJwtValidationTest ====");

    final String topic = "KeycloakRefreshTokenWithJwtValidationTest";
    final String tokenEndpointUri = "http://" + HOST + ":8080/auth/realms/" + REALM + "/protocol/openid-connect/token";

    String refreshToken = loginWithClientSecretForRefreshToken(URI.create(tokenEndpointUri), CLIENT_ID, CLIENT_SECRET);

    Properties defaults = new Properties();
    defaults.setProperty(ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, tokenEndpointUri);
    defaults.setProperty(ClientConfig.OAUTH_CLIENT_ID, CLIENT_ID);
    defaults.setProperty(ClientConfig.OAUTH_CLIENT_SECRET, CLIENT_SECRET);
    defaults.setProperty(ClientConfig.OAUTH_REFRESH_TOKEN, refreshToken);
    defaults.setProperty(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username");

    ConfigProperties.resolveAndExportToSystemProperties(defaults);

    Properties producerProps = buildProducerConfig();
    Producer<String, String> producer = new KafkaProducer<>(producerProps);

    producer.send(new ProducerRecord<>(topic, "The Message")).get();
    System.out.println("Produced The Message");

    Properties consumerProps = buildConsumerConfig();
    Consumer<String, String> consumer = new KafkaConsumer<>(consumerProps);

    TopicPartition partition = new TopicPartition(topic, 0);
    consumer.assign(Arrays.asList(partition));

    while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) {
        System.out.println("No assignment yet for consumer");
    }
    consumer.seekToBeginning(Arrays.asList(partition));

    ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));

    Assert.assertEquals("Got message", 1, records.count());
    Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value());
}
 
Example 9
Source File: KeycloakClientCredentialsWithJwtValidationAuthzTest.java    From strimzi-kafka-oauth with Apache License 2.0 5 votes vote down vote up
static void consume(Consumer<String, String> consumer, String topic) {
    TopicPartition partition = new TopicPartition(topic, 0);
    consumer.assign(Arrays.asList(partition));

    while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) {
        System.out.println("No assignment yet for consumer");
    }

    consumer.seekToBeginning(Arrays.asList(partition));
    ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(10));

    Assert.assertTrue("Got message", records.count() >= 1);
}
 
Example 10
Source File: ConsumerTest.java    From kbear with Apache License 2.0 5 votes vote down vote up
protected void pollOut(Consumer<String, String> consumer) {
    long beforePoll = System.currentTimeMillis();
    while (true) {
        ConsumerRecords<String, String> polled = consumer.poll(Duration.ofSeconds(1));
        System.out.println("polled count: " + polled.count());
        if (System.currentTimeMillis() - beforePoll > 60 * 1000) {
            Assert.assertEquals(0, polled.count());
            break;
        }
    }
}
 
Example 11
Source File: HydraOpaqueAccessTokenWithIntrospectValidationTest.java    From strimzi-kafka-oauth with Apache License 2.0 4 votes vote down vote up
@Test
public void doTest() throws Exception {
    System.out.println("==== HydraOpaqueAccessTokenWithIntrospectValidationTest ====");

    final String topic = "HydraOpaqueAccessTokenWithIntrospectValidationTest";
    final String tokenEndpointUri = "https://" + HOST + ":4444/oauth2/token";

    Properties defaults = new Properties();
    defaults.setProperty(ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, tokenEndpointUri);
    defaults.setProperty(ClientConfig.OAUTH_ACCESS_TOKEN_IS_JWT, "false");

    defaults.setProperty(ClientConfig.OAUTH_SSL_TRUSTSTORE_LOCATION, "../docker/target/kafka/certs/ca-truststore.p12");
    defaults.setProperty(ClientConfig.OAUTH_SSL_TRUSTSTORE_PASSWORD, "changeit");
    defaults.setProperty(ClientConfig.OAUTH_SSL_TRUSTSTORE_TYPE, "pkcs12");

    ConfigProperties.resolveAndExportToSystemProperties(defaults);

    // Request access token using client id and secret, and trustore configuration
    TokenInfo info = OAuthAuthenticator.loginWithClientSecret(URI.create(tokenEndpointUri),
            ConfigUtil.createSSLFactory(new ClientConfig()),
            null, CLIENT_ID, CLIENT_SECRET, true, null, null);

    // Configure received token for Kafka client auth
    defaults.setProperty(ClientConfig.OAUTH_ACCESS_TOKEN, info.token());
    ConfigProperties.resolveAndExportToSystemProperties(defaults);


    Properties producerProps = buildProducerConfig();
    Producer<String, String> producer = new KafkaProducer<>(producerProps);

    producer.send(new ProducerRecord<>(topic, "The Message")).get();
    System.out.println("Produced The Message");

    Properties consumerProps = buildConsumerConfig();
    Consumer<String, String> consumer = new KafkaConsumer<>(consumerProps);

    TopicPartition partition = new TopicPartition(topic, 0);
    consumer.assign(Arrays.asList(partition));

    while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) {
        System.out.println("No assignment yet for consumer");
    }
    consumer.seekToBeginning(Arrays.asList(partition));

    ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));

    Assert.assertEquals("Got message", 1, records.count());
    Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value());
}
 
Example 12
Source File: HydraClientCredentialsWithJwtValidationTest.java    From strimzi-kafka-oauth with Apache License 2.0 4 votes vote down vote up
@Test
public void doTest() throws Exception {
    System.out.println("==== HydraClientCredentialsWithJwtValidationTest ====");

    final String topic = "HydraClientCredentialsWithJwtValidationTest";
    final String tokenEndpointUri = "https://" + HOST + ":4444/oauth2/token";

    Properties defaults = new Properties();
    defaults.setProperty(ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, tokenEndpointUri);
    defaults.setProperty(ClientConfig.OAUTH_CLIENT_ID, "kafka-producer-client");
    defaults.setProperty(ClientConfig.OAUTH_CLIENT_SECRET, "kafka-producer-client-secret");
    defaults.setProperty(ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, tokenEndpointUri);

    defaults.setProperty(ClientConfig.OAUTH_SSL_TRUSTSTORE_LOCATION, "../docker/target/kafka/certs/ca-truststore.p12");
    defaults.setProperty(ClientConfig.OAUTH_SSL_TRUSTSTORE_PASSWORD, "changeit");
    defaults.setProperty(ClientConfig.OAUTH_SSL_TRUSTSTORE_TYPE, "pkcs12");

    ConfigProperties.resolveAndExportToSystemProperties(defaults);

    Properties producerProps = buildProducerConfig();
    Producer<String, String> producer = new KafkaProducer<>(producerProps);

    producer.send(new ProducerRecord<>(topic, "The Message")).get();
    System.out.println("Produced The Message");

    Properties consumerProps = buildConsumerConfig();
    Consumer<String, String> consumer = new KafkaConsumer<>(consumerProps);

    TopicPartition partition = new TopicPartition(topic, 0);
    consumer.assign(Arrays.asList(partition));

    while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) {
        System.out.println("No assignment yet for consumer");
    }
    consumer.seekToBeginning(Arrays.asList(partition));

    ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));

    Assert.assertEquals("Got message", 1, records.count());
    Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value());
}
 
Example 13
Source File: ExampleConsumer.java    From strimzi-kafka-oauth with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {

        String topic = "a_Topic1";

        Properties defaults = new Properties();
        Config external = new Config();

        //  Set KEYCLOAK_HOST to connect to Keycloak host other than 'keycloak'
        //  Use 'keycloak.host' system property or KEYCLOAK_HOST env variable

        final String keycloakHost = external.getValue("keycloak.host", "keycloak");
        final String realm = external.getValue("realm", "demo");
        final String tokenEndpointUri = "http://" + keycloakHost + ":8080/auth/realms/" + realm + "/protocol/openid-connect/token";

        //  You can also configure token endpoint uri directly via 'oauth.token.endpoint.uri' system property,
        //  or OAUTH_TOKEN_ENDPOINT_URI env variable

        defaults.setProperty(ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, tokenEndpointUri);

        //  By defaut this client uses preconfigured clientId and secret to authenticate.
        //  You can set OAUTH_ACCESS_TOKEN or OAUTH_REFRESH_TOKEN to override default authentication.
        //
        //  If access token is configured, it is passed directly to Kafka broker
        //  If refresh token is configured, it is used in conjunction with clientId and secret
        //
        //  See examples README.md for more info.

        final String accessToken = external.getValue(ClientConfig.OAUTH_ACCESS_TOKEN, null);

        if (accessToken == null) {
            defaults.setProperty(Config.OAUTH_CLIENT_ID, "kafka-consumer-client");
            defaults.setProperty(Config.OAUTH_CLIENT_SECRET, "kafka-consumer-client-secret");
        }

        // Use 'preferred_username' rather than 'sub' for principal name
        if (isAccessTokenJwt(external)) {
            defaults.setProperty(Config.OAUTH_USERNAME_CLAIM, "preferred_username");
        }

        // Resolve external configurations falling back to provided defaults
        ConfigProperties.resolveAndExportToSystemProperties(defaults);

        Properties props = buildConsumerConfig();
        Consumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList(topic));

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("Consumed message: " + record.value());
            }
        }
    }
 
Example 14
Source File: KeycloakClientCredentialsWithJwtValidationTest.java    From strimzi-kafka-oauth with Apache License 2.0 4 votes vote down vote up
@Test
public void doTest() throws Exception {
    System.out.println("==== KeycloakClientCredentialsWithJwtValidationTest + test EC ====");

    Properties p = System.getProperties();
    for (Object key: p.keySet()) {
        System.out.println("" + key + "=" + p.get(key));
    }

    final String topic = "KeycloakClientCredentialsWithJwtValidationTest";
    final String tokenEndpointUri = "http://" + HOST + ":8080/auth/realms/" + REALM + "/protocol/openid-connect/token";

    Properties defaults = new Properties();
    defaults.setProperty(ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, tokenEndpointUri);
    defaults.setProperty(ClientConfig.OAUTH_CLIENT_ID, "kafka-producer-client");
    defaults.setProperty(ClientConfig.OAUTH_CLIENT_SECRET, "kafka-producer-client-secret");
    defaults.setProperty(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username");

    ConfigProperties.resolveAndExportToSystemProperties(defaults);

    Properties producerProps = buildProducerConfig();
    Producer<String, String> producer = new KafkaProducer<>(producerProps);

    producer.send(new ProducerRecord<>(topic, "The Message")).get();
    System.out.println("Produced The Message");

    Properties consumerProps = buildConsumerConfig();
    Consumer<String, String> consumer = new KafkaConsumer<>(consumerProps);

    TopicPartition partition = new TopicPartition(topic, 0);
    consumer.assign(Arrays.asList(partition));

    while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) {
        System.out.println("No assignment yet for consumer");
    }
    consumer.seekToBeginning(Arrays.asList(partition));

    ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));

    Assert.assertEquals("Got message", 1, records.count());
    Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value());
}
 
Example 15
Source File: MessageClient.java    From alcor with Apache License 2.0 4 votes vote down vote up
public List<?> runConsumer(String topic, boolean keepRunning) {
    Logger logger = LoggerFactory.getLogger();

    if (this.messageConsumerFactory == null) {
        logger.log(Level.INFO, "No message consumer factory is specified");
        return null;
    }

    List recordsValue = new ArrayList();
    Consumer consumer = this.messageConsumerFactory.Create();
    consumer.subscribe(Collections.singletonList(topic));

    int noMessageFound = 0;
    while (keepRunning) {
        // 1000 milliseconds is the time consumer will wait if no record is found at broker.
        ConsumerRecords<Long, ?> consumerRecords = consumer.poll(1000);

        if (consumerRecords.count() == 0) {
            noMessageFound++;
            logger.log(Level.INFO, "No message found :" + noMessageFound);

            if (noMessageFound > IKafkaConfiguration.MAX_NO_MESSAGE_FOUND_COUNT)
                // If no message found count is reached to threshold exit loop.
                break;
            else
                continue;
        }

        //print each record.
        consumerRecords.forEach(record -> {
            logger.log(Level.INFO, "Record Key " + record.key());
            logger.log(Level.INFO, "Record value " + record.value());
            logger.log(Level.INFO, "Record partition " + record.partition());
            logger.log(Level.INFO, "Record offset " + record.offset());

            recordsValue.add(record.value());
        });
        // commits the offset of record to broker.
        consumer.commitAsync();
    }

    consumer.close();
    return recordsValue;
}
 
Example 16
Source File: KafkaApiTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testProducerConsumerJsonSchemaWithPulsarKafkaClient() throws Exception {
    String topic = "testProducerConsumerJsonSchemaWithPulsarKafkaClient";

    JSONSchema<Bar> barSchema = JSONSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build());
    JSONSchema<Foo> fooSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Properties props = new Properties();
    props.put("bootstrap.servers", getPlainTextServiceUrl());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @Cleanup
    Consumer<Bar, Foo> consumer = new KafkaConsumer<>(props, barSchema, fooSchema);
    consumer.subscribe(Arrays.asList(topic));

    Producer<Bar, Foo> producer = new KafkaProducer<>(props, barSchema, fooSchema);

    for (int i = 0; i < 10; i++) {
        Bar bar = new Bar();
        bar.setField1(true);

        Foo foo = new Foo();
        foo.setField1("field1");
        foo.setField2("field2");
        foo.setField3(i);
        producer.send(new ProducerRecord<>(topic, bar, foo));
    }
    producer.flush();
    producer.close();

    AtomicInteger received = new AtomicInteger();
    while (received.get() < 10) {
        ConsumerRecords<Bar, Foo> records = consumer.poll(100);
        if (!records.isEmpty()) {
            records.forEach(record -> {
                Bar key = record.key();
                Assert.assertTrue(key.isField1());
                Foo value = record.value();
                Assert.assertEquals(value.getField1(), "field1");
                Assert.assertEquals(value.getField2(), "field2");
                Assert.assertEquals(value.getField3(), received.get());
                received.incrementAndGet();
            });

            consumer.commitSync();
        }
    }
}
 
Example 17
Source File: KafkaReceiver.java    From zerocode with Apache License 2.0 4 votes vote down vote up
public String receive(String kafkaServers, String topicName, String requestJsonWithConfig) throws IOException {

        ConsumerLocalConfigs consumerLocalConfigs = readConsumerLocalTestProperties(requestJsonWithConfig);

        ConsumerLocalConfigs effectiveLocal = deriveEffectiveConfigs(consumerLocalConfigs, consumerCommonConfigs);

        LOGGER.info("\n### Kafka Consumer Effective configs:{}\n", effectiveLocal);

        Consumer consumer = createConsumer(kafkaServers, consumerPropertyFile, topicName);

        final List<ConsumerRecord> rawRecords = new ArrayList<>();
        final List<ConsumerJsonRecord> jsonRecords = new ArrayList<>();

        int noOfTimeOuts = 0;

        handleSeekOffset(effectiveLocal, consumer);

        while (true) {
            LOGGER.info("polling records  - noOfTimeOuts reached : " + noOfTimeOuts);

            final ConsumerRecords records = consumer.poll(ofMillis(getPollTime(effectiveLocal)));

            if (records.count() == 0) {
                noOfTimeOuts++;
                if (noOfTimeOuts > getMaxTimeOuts(effectiveLocal)) {
                    break;
                } else {
                    continue;
                }
            } else {
                LOGGER.info("Got {} records after {} timeouts\n", records.count(), noOfTimeOuts);
                // -----------------------------------
                // reset after it fetched some records
                // -----------------------------------
                noOfTimeOuts = 0;
            }

            if (records != null) {
                Iterator recordIterator = records.iterator();

                LOGGER.info("Consumer chosen recordType: " + effectiveLocal.getRecordType());

                switch (effectiveLocal.getRecordType()) {
                    case RAW:
                        readRaw(rawRecords, recordIterator);
                        break;

                    case JSON:
                        readJson(jsonRecords, recordIterator);
                        break;

                    default:
                        throw new RuntimeException("Unsupported record type - '" + effectiveLocal.getRecordType()
                                + "'. Supported values are 'JSON','RAW'");
                }

            }

            handleCommitSyncAsync(consumer, consumerCommonConfigs, effectiveLocal);
        }

        consumer.close();

        handleRecordsDump(effectiveLocal, rawRecords, jsonRecords);

        return prepareResult(effectiveLocal, jsonRecords, rawRecords);

    }
 
Example 18
Source File: KafkaApiTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testProducerConsumerAvroSchemaWithPulsarKafkaClient() throws Exception {
    String topic = "testProducerConsumerAvroSchemaWithPulsarKafkaClient";

    AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build());
    AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Properties props = new Properties();
    props.put("bootstrap.servers", getPlainTextServiceUrl());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @Cleanup
    Consumer<Bar, Foo> consumer = new KafkaConsumer<>(props, barSchema, fooSchema);
    consumer.subscribe(Arrays.asList(topic));

    Producer<Bar, Foo> producer = new KafkaProducer<>(props, barSchema, fooSchema);

    for (int i = 0; i < 10; i++) {
        Bar bar = new Bar();
        bar.setField1(true);

        Foo foo = new Foo();
        foo.setField1("field1");
        foo.setField2("field2");
        foo.setField3(i);
        producer.send(new ProducerRecord<>(topic, bar, foo));
    }
    producer.flush();
    producer.close();

    AtomicInteger received = new AtomicInteger();
    while (received.get() < 10) {
        ConsumerRecords<Bar, Foo> records = consumer.poll(100);
        if (!records.isEmpty()) {
            records.forEach(record -> {
                Bar key = record.key();
                Assert.assertTrue(key.isField1());
                Foo value = record.value();
                Assert.assertEquals(value.getField1(), "field1");
                Assert.assertEquals(value.getField2(), "field2");
                Assert.assertEquals(value.getField3(), received.get());
                received.incrementAndGet();
            });

            consumer.commitSync();
        }
    }
}
 
Example 19
Source File: CruiseControlMetricsReporterTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testReportingMetrics() throws ExecutionException, InterruptedException {
  Properties props = new Properties();
  props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers());
  props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MetricSerde.class.getName());
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testReportingMetrics");
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  setSecurityConfigs(props, "consumer");
  Consumer<String, CruiseControlMetric> consumer = new KafkaConsumer<>(props);

  consumer.subscribe(Collections.singleton(TOPIC));
  long startMs = System.currentTimeMillis();
  HashSet<Integer> expectedMetricTypes = new HashSet<>(Arrays.asList((int) ALL_TOPIC_BYTES_IN.id(),
                                                                     (int) ALL_TOPIC_BYTES_OUT.id(),
                                                                     (int) TOPIC_BYTES_IN.id(),
                                                                     (int) TOPIC_BYTES_OUT.id(),
                                                                     (int) PARTITION_SIZE.id(),
                                                                     (int) BROKER_CPU_UTIL.id(),
                                                                     (int) ALL_TOPIC_REPLICATION_BYTES_IN.id(),
                                                                     (int) ALL_TOPIC_REPLICATION_BYTES_OUT.id(),
                                                                     (int) ALL_TOPIC_PRODUCE_REQUEST_RATE.id(),
                                                                     (int) ALL_TOPIC_FETCH_REQUEST_RATE.id(),
                                                                     (int) ALL_TOPIC_MESSAGES_IN_PER_SEC.id(),
                                                                     (int) TOPIC_PRODUCE_REQUEST_RATE.id(),
                                                                     (int) TOPIC_FETCH_REQUEST_RATE.id(),
                                                                     (int) TOPIC_MESSAGES_IN_PER_SEC.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_RATE.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_RATE.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_RATE.id(),
                                                                     (int) BROKER_REQUEST_HANDLER_AVG_IDLE_PERCENT.id(),
                                                                     (int) BROKER_REQUEST_QUEUE_SIZE.id(),
                                                                     (int) BROKER_RESPONSE_QUEUE_SIZE.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_MAX.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_MAX.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_MAX.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_LOG_FLUSH_RATE.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_MAX.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_50TH.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_999TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_50TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_999TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_50TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_999TH.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_50TH.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_999TH.id()));
  Set<Integer> metricTypes = new HashSet<>();
  ConsumerRecords<String, CruiseControlMetric> records;
  while (metricTypes.size() < expectedMetricTypes.size() && System.currentTimeMillis() < startMs + 15000) {
    records = consumer.poll(Duration.ofMillis(10L));
    for (ConsumerRecord<String, CruiseControlMetric> record : records) {
      metricTypes.add((int) record.value().rawMetricType().id());
    }
  }
  assertEquals("Expected " + expectedMetricTypes + ", but saw " + metricTypes, expectedMetricTypes, metricTypes);
}
 
Example 20
Source File: KafkaClientMetricsIntegrationTest.java    From micrometer with Apache License 2.0 4 votes vote down vote up
@Test
void shouldManageProducerAndConsumerMetrics() {
    SimpleMeterRegistry registry = new SimpleMeterRegistry();

    assertThat(registry.getMeters()).hasSize(0);

    Properties producerConfigs = new Properties();
    producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
            kafkaContainer.getBootstrapServers());
    Producer<String, String> producer = new KafkaProducer<>(
            producerConfigs, new StringSerializer(), new StringSerializer());

    KafkaClientMetrics producerKafkaMetrics = new KafkaClientMetrics(producer);
    producerKafkaMetrics.bindTo(registry);

    int producerMetrics = registry.getMeters().size();
    assertThat(registry.getMeters()).hasSizeGreaterThan(0);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    Properties consumerConfigs = new Properties();
    consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
            kafkaContainer.getBootstrapServers());
    consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
    Consumer<String, String> consumer = new KafkaConsumer<>(
            consumerConfigs, new StringDeserializer(), new StringDeserializer());

    KafkaClientMetrics consumerKafkaMetrics = new KafkaClientMetrics(consumer);
    consumerKafkaMetrics.bindTo(registry);

    //Printing out for discovery purposes
    out.println("Meters from producer before sending:");
    printMeters(registry);

    int producerAndConsumerMetrics = registry.getMeters().size();
    assertThat(registry.getMeters()).hasSizeGreaterThan(producerMetrics);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    String topic = "test";
    producer.send(new ProducerRecord<>(topic, "key", "value"));
    producer.flush();

    //Printing out for discovery purposes
    out.println("Meters from producer after sending and consumer before poll:");
    printMeters(registry);

    producerKafkaMetrics.checkAndBindMetrics(registry);

    int producerAndConsumerMetricsAfterSend = registry.getMeters().size();
    assertThat(registry.getMeters()).hasSizeGreaterThan(producerAndConsumerMetrics);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    consumer.subscribe(Collections.singletonList(topic));

    consumer.poll(Duration.ofMillis(100));

    //Printing out for discovery purposes
    out.println("Meters from producer and consumer after polling:");
    printMeters(registry);

    consumerKafkaMetrics.checkAndBindMetrics(registry);

    assertThat(registry.getMeters()).hasSizeGreaterThan(producerAndConsumerMetricsAfterSend);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    //Printing out for discovery purposes
    out.println("All meters from producer and consumer:");
    printMeters(registry);

    producerKafkaMetrics.close();
    consumerKafkaMetrics.close();
}