Java Code Examples for org.apache.kafka.clients.consumer.Consumer#subscribe()

The following examples show how to use org.apache.kafka.clients.consumer.Consumer#subscribe() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestUtils.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
public static List<ConsumerRecord<Byte[], Byte[]>> consumeMessage(String bootstrapServer,
    String topicName,
    int timeoutMs
) throws InterruptedException {

  long time = new Date().getTime();
  Consumer<Byte[], Byte[]> consumer = createConsumer(bootstrapServer);
  consumer.subscribe(Collections.singletonList(topicName));

  List<ConsumerRecord<Byte[], Byte[]>> result = new ArrayList<>();
  while ((new Date().getTime()) - time < timeoutMs) {
    ConsumerRecords<Byte[], Byte[]> records = consumer.poll(1000);
    Iterator<ConsumerRecord<Byte[], Byte[]>> iterator = records.iterator();
    while (iterator.hasNext()) {
      result.add(iterator.next());
    }
    Thread.sleep(300);
  }
  consumer.close();
  return result;
}
 
Example 2
Source File: ConsumerPool.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Obtains a consumer from the pool if one is available or lazily
 * initializes a new one if deemed necessary.
 *
 * @param session the session for which the consumer lease will be
 * associated
 * @return consumer to use or null if not available or necessary
 */
public ConsumerLease obtainConsumer(final ProcessSession session) {
    SimpleConsumerLease lease = pooledLeases.poll();
    if (lease == null) {
        final Consumer<byte[], byte[]> consumer = createKafkaConsumer();
        consumerCreatedCountRef.incrementAndGet();
        /**
         * For now return a new consumer lease. But we could later elect to
         * have this return null if we determine the broker indicates that
         * the lag time on all topics being monitored is sufficiently low.
         * For now we should encourage conservative use of threads because
         * having too many means we'll have at best useless threads sitting
         * around doing frequent network calls and at worst having consumers
         * sitting idle which could prompt excessive rebalances.
         */
        lease = new SimpleConsumerLease(consumer);
        /**
         * This subscription tightly couples the lease to the given
         * consumer. They cannot be separated from then on.
         */
        consumer.subscribe(topics, lease);
    }
    lease.setProcessSession(session);
    leasesObtainedCountRef.incrementAndGet();
    return lease;
}
 
Example 3
Source File: KafkaNativeSerializationApplicationTests.java    From spring-cloud-stream-samples with Apache License 2.0 6 votes vote down vote up
@Test
public void testSendReceive() {
	Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka());
	senderProps.put("value.serializer", StringSerializer.class);
	DefaultKafkaProducerFactory<byte[], String> pf = new DefaultKafkaProducerFactory<>(senderProps);
	KafkaTemplate<byte[], String> template = new KafkaTemplate<>(pf, true);
	template.setDefaultTopic(INPUT_TOPIC);
	template.sendDefault("foo");

	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps(GROUP_NAME, "false", embeddedKafka.getEmbeddedKafka());
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	consumerProps.put("value.deserializer", MyJsonDeserializer.class);
	DefaultKafkaConsumerFactory<byte[], Person> cf = new DefaultKafkaConsumerFactory<>(consumerProps);

	Consumer<byte[], Person> consumer = cf.createConsumer();
	consumer.subscribe(Collections.singleton(OUTPUT_TOPIC));
	ConsumerRecords<byte[], Person> records = consumer.poll(Duration.ofSeconds(10));
	consumer.commitSync();

	assertThat(records.count()).isEqualTo(1);
	assertThat(new String(records.iterator().next().value().getName())).isEqualTo("foo");
}
 
Example 4
Source File: ThreadedConsumerExample.java    From kafka-streams-in-action with Apache License 2.0 6 votes vote down vote up
private Runnable getConsumerThread(Properties properties) {
    return () -> {
        Consumer<String, String> consumer = null;
        try {
            consumer = new KafkaConsumer<>(properties);
            consumer.subscribe(Collections.singletonList("test-topic"));
            while (!doneConsuming) {
                ConsumerRecords<String, String> records = consumer.poll(5000);
                for (ConsumerRecord<String, String> record : records) {
                    String message = String.format("Consumed: key = %s value = %s with offset = %d partition = %d",
                            record.key(), record.value(), record.offset(), record.partition());
                    System.out.println(message);
                }

            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (consumer != null) {
                consumer.close();
            }
        }
    };
}
 
Example 5
Source File: SubscribableKafkaMessageSource.java    From extension-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Start polling the {@code topics} configured through {@link Builder#topics(List)}/{@link Builder#addTopic(String)}
 * with a {@link Consumer} build by the {@link ConsumerFactory}.
 * <p>
 * This operation should be called <b>only</b> if all desired Event Processors have been subscribed (through the
 * {@link #subscribe(java.util.function.Consumer)} method).
 */
public void start() {
    if (inProgress.getAndSet(true)) {
        return;
    }

    for (int consumerIndex = 0; consumerIndex < consumerCount; consumerIndex++) {
        Consumer<K, V> consumer = consumerFactory.createConsumer(groupId);
        consumer.subscribe(topics);

        Registration closeConsumer = fetcher.poll(
                consumer,
                consumerRecords -> StreamSupport.stream(consumerRecords.spliterator(), false)
                                                .map(messageConverter::readKafkaMessage)
                                                .filter(Optional::isPresent)
                                                .map(Optional::get)
                                                .collect(Collectors.toList()),
                eventMessages -> eventProcessors.forEach(eventProcessor -> eventProcessor.accept(eventMessages))
        );
        fetcherRegistrations.add(closeConsumer);
    }
}
 
Example 6
Source File: ConsumerPool.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Obtains a consumer from the pool if one is available or lazily
 * initializes a new one if deemed necessary.
 *
 * @param session the session for which the consumer lease will be
 *            associated
 * @param processContext the ProcessContext for which the consumer
 *            lease will be associated
 * @return consumer to use or null if not available or necessary
 */
public ConsumerLease obtainConsumer(final ProcessSession session, final ProcessContext processContext) {
    SimpleConsumerLease lease = pooledLeases.poll();
    if (lease == null) {
        final Consumer<byte[], byte[]> consumer = createKafkaConsumer();
        consumerCreatedCountRef.incrementAndGet();
        /**
         * For now return a new consumer lease. But we could later elect to
         * have this return null if we determine the broker indicates that
         * the lag time on all topics being monitored is sufficiently low.
         * For now we should encourage conservative use of threads because
         * having too many means we'll have at best useless threads sitting
         * around doing frequent network calls and at worst having consumers
         * sitting idle which could prompt excessive rebalances.
         */
        lease = new SimpleConsumerLease(consumer);
        /**
         * This subscription tightly couples the lease to the given
         * consumer. They cannot be separated from then on.
         */
        if (topics != null) {
          consumer.subscribe(topics, lease);
        } else {
          consumer.subscribe(topicPattern, lease);
        }
    }
    lease.setProcessSession(session, processContext);

    leasesObtainedCountRef.incrementAndGet();
    return lease;
}
 
Example 7
Source File: ConsumerAvroExample.java    From pulsar with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    String topic = "persistent://public/default/test-avro";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build());
    AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Bar bar = new Bar();
    bar.setField1(true);

    Foo foo = new Foo();
    foo.setField1("field1");
    foo.setField2("field2");
    foo.setField3(3);

    @SuppressWarnings("resource")
    Consumer<Foo, Bar> consumer = new KafkaConsumer<>(props, fooSchema, barSchema);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Foo, Bar> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
Example 8
Source File: ConsumerPool.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Obtains a consumer from the pool if one is available or lazily
 * initializes a new one if deemed necessary.
 *
 * @param session the session for which the consumer lease will be
 *            associated
 * @param processContext the ProcessContext for which the consumer
 *            lease will be associated
 * @return consumer to use or null if not available or necessary
 */
public ConsumerLease obtainConsumer(final ProcessSession session, final ProcessContext processContext) {
    SimpleConsumerLease lease = pooledLeases.poll();
    if (lease == null) {
        final Consumer<byte[], byte[]> consumer = createKafkaConsumer();
        consumerCreatedCountRef.incrementAndGet();
        /**
         * For now return a new consumer lease. But we could later elect to
         * have this return null if we determine the broker indicates that
         * the lag time on all topics being monitored is sufficiently low.
         * For now we should encourage conservative use of threads because
         * having too many means we'll have at best useless threads sitting
         * around doing frequent network calls and at worst having consumers
         * sitting idle which could prompt excessive rebalances.
         */
        lease = new SimpleConsumerLease(consumer);
        /**
         * This subscription tightly couples the lease to the given
         * consumer. They cannot be separated from then on.
         */
        if (topics != null) {
          consumer.subscribe(topics, lease);
        } else {
          consumer.subscribe(topicPattern, lease);
        }
    }
    lease.setProcessSession(session, processContext);

    leasesObtainedCountRef.incrementAndGet();
    return lease;
}
 
Example 9
Source File: KafkaProducer09IT.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void verify(
    final String topic,
    final int numMessages,
    final String metadataBrokerList,
    String message
) {
  Properties props = new Properties();
  props.put("bootstrap.servers", metadataBrokerList);
  props.put("group.id", "test");
  props.put("enable.auto.commit", "false");
  props.put("auto.commit.interval.ms", "1000");
  props.put("auto.offset.reset", "earliest");
  props.put("session.timeout.ms", "30000");
  props.put(KafkaConstants.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  props.put(KafkaConstants.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  Consumer<String, String> consumer = new KafkaConsumer<>(props);
  consumer.subscribe(Collections.singletonList(topic));
  List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
  while (buffer.size() < 1) {
    ConsumerRecords<String, String> records = consumer.poll(1000);
    for (ConsumerRecord<String, String> record : records) {
      buffer.add(record);
    }
  }
  Assert.assertEquals(numMessages, buffer.size());
  Assert.assertEquals(message, buffer.get(0).value());
}
 
Example 10
Source File: ConsumerTest.java    From kbear with Apache License 2.0 5 votes vote down vote up
protected void close(java.util.function.Consumer<Consumer<String, String>> closer) throws InterruptedException {
    produceMessages();

    Consumer<String, String> consumer = createConsumer();
    try {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);
    } finally {
        closer.accept(consumer);
    }
}
 
Example 11
Source File: ConsumerPool.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Obtains a consumer from the pool if one is available or lazily
 * initializes a new one if deemed necessary.
 *
 * @param session the session for which the consumer lease will be
 *            associated
 * @param processContext the ProcessContext for which the consumer
 *            lease will be associated
 * @return consumer to use or null if not available or necessary
 */
public ConsumerLease obtainConsumer(final ProcessSession session, final ProcessContext processContext) {
    SimpleConsumerLease lease = pooledLeases.poll();
    if (lease == null) {
        final Consumer<byte[], byte[]> consumer = createKafkaConsumer();
        consumerCreatedCountRef.incrementAndGet();
        /**
         * For now return a new consumer lease. But we could later elect to
         * have this return null if we determine the broker indicates that
         * the lag time on all topics being monitored is sufficiently low.
         * For now we should encourage conservative use of threads because
         * having too many means we'll have at best useless threads sitting
         * around doing frequent network calls and at worst having consumers
         * sitting idle which could prompt excessive rebalances.
         */
        lease = new SimpleConsumerLease(consumer);
        /**
         * This subscription tightly couples the lease to the given
         * consumer. They cannot be separated from then on.
         */
        if (topics != null) {
          consumer.subscribe(topics, lease);
        } else {
          consumer.subscribe(topicPattern, lease);
        }
    }
    lease.setProcessSession(session, processContext);

    leasesObtainedCountRef.incrementAndGet();
    return lease;
}
 
Example 12
Source File: ConsumerAvroExample.java    From pulsar with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    String topic = "persistent://public/default/test-avro";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build());
    AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Bar bar = new Bar();
    bar.setField1(true);

    Foo foo = new Foo();
    foo.setField1("field1");
    foo.setField2("field2");
    foo.setField3(3);

    @SuppressWarnings("resource")
    Consumer<Foo, Bar> consumer = new KafkaConsumer<>(props, fooSchema, barSchema);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Foo, Bar> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
Example 13
Source File: KafkaStreamTable.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Override public Enumerable<Object[]> scan(final DataContext root) {
  final AtomicBoolean cancelFlag = DataContext.Variable.CANCEL_FLAG.get(root);
  return new AbstractEnumerable<Object[]>() {
    public Enumerator<Object[]> enumerator() {
      if (tableOptions.getConsumer() != null) {
        return new KafkaMessageEnumerator(tableOptions.getConsumer(),
            tableOptions.getRowConverter(), cancelFlag);
      }

      Properties consumerConfig = new Properties();
      consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
          tableOptions.getBootstrapServers());
      //by default it's <byte[], byte[]>
      consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
          "org.apache.kafka.common.serialization.ByteArrayDeserializer");
      consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
          "org.apache.kafka.common.serialization.ByteArrayDeserializer");

      if (tableOptions.getConsumerParams() != null) {
        consumerConfig.putAll(tableOptions.getConsumerParams());
      }
      Consumer consumer = new KafkaConsumer<>(consumerConfig);
      consumer.subscribe(Collections.singletonList(tableOptions.getTopicName()));

      return new KafkaMessageEnumerator(consumer, tableOptions.getRowConverter(), cancelFlag);
    }
  };
}
 
Example 14
Source File: ConsumerPool.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Obtains a consumer from the pool if one is available or lazily
 * initializes a new one if deemed necessary.
 *
 * @param session the session for which the consumer lease will be
 * associated
 * @return consumer to use or null if not available or necessary
 */
public ConsumerLease obtainConsumer(final ProcessSession session) {
    SimpleConsumerLease lease = pooledLeases.poll();
    if (lease == null) {
        final Consumer<byte[], byte[]> consumer = createKafkaConsumer();
        consumerCreatedCountRef.incrementAndGet();
        /**
         * For now return a new consumer lease. But we could later elect to
         * have this return null if we determine the broker indicates that
         * the lag time on all topics being monitored is sufficiently low.
         * For now we should encourage conservative use of threads because
         * having too many means we'll have at best useless threads sitting
         * around doing frequent network calls and at worst having consumers
         * sitting idle which could prompt excessive rebalances.
         */
        lease = new SimpleConsumerLease(consumer);

        /**
         * This subscription tightly couples the lease to the given
         * consumer. They cannot be separated from then on.
         */
        consumer.subscribe(topics, lease);
    }
    lease.setProcessSession(session);
    leasesObtainedCountRef.incrementAndGet();
    return lease;
}
 
Example 15
Source File: KafkaApiTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testProducerConsumerAvroSchemaWithPulsarKafkaClient() throws Exception {
    String topic = "testProducerConsumerAvroSchemaWithPulsarKafkaClient";

    AvroSchema<Bar> barSchema = AvroSchema.of(SchemaDefinition.<Bar>builder().withPojo(Bar.class).build());
    AvroSchema<Foo> fooSchema = AvroSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());

    Properties props = new Properties();
    props.put("bootstrap.servers", getPlainTextServiceUrl());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @Cleanup
    Consumer<Bar, Foo> consumer = new KafkaConsumer<>(props, barSchema, fooSchema);
    consumer.subscribe(Arrays.asList(topic));

    Producer<Bar, Foo> producer = new KafkaProducer<>(props, barSchema, fooSchema);

    for (int i = 0; i < 10; i++) {
        Bar bar = new Bar();
        bar.setField1(true);

        Foo foo = new Foo();
        foo.setField1("field1");
        foo.setField2("field2");
        foo.setField3(i);
        producer.send(new ProducerRecord<>(topic, bar, foo));
    }
    producer.flush();
    producer.close();

    AtomicInteger received = new AtomicInteger();
    while (received.get() < 10) {
        ConsumerRecords<Bar, Foo> records = consumer.poll(100);
        if (!records.isEmpty()) {
            records.forEach(record -> {
                Bar key = record.key();
                Assert.assertTrue(key.isField1());
                Foo value = record.value();
                Assert.assertEquals(value.getField1(), "field1");
                Assert.assertEquals(value.getField2(), "field2");
                Assert.assertEquals(value.getField3(), received.get());
                received.incrementAndGet();
            });

            consumer.commitSync();
        }
    }
}
 
Example 16
Source File: AsyncFetcherTest.java    From extension-kafka with Apache License 2.0 4 votes vote down vote up
/**
 * This test extends outwards of the {@link AsyncFetcher}, by verifying the {@link FetchEventsTask} it creates will
 * also consume the records from an integrated Kafka set up. In doing so, the test case mirror closely what the
 * {@link StreamableKafkaMessageSource} implementation does when calling the AsyncFetcher, by for example creating a
 * {@link Consumer} and tying it to a group and topic.
 */
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
void testStartFetcherWithExistingTokenShouldStartAtSpecificPositions() throws InterruptedException {
    int expectedNumberOfMessages = 26;
    CountDownLatch messageCounter = new CountDownLatch(expectedNumberOfMessages);

    String testTopic = "testStartFetcherWith_ExistingToken_ShouldStartAtSpecificPositions";
    int p0 = 0;
    int p1 = 1;
    int p2 = 2;
    int p3 = 3;
    int p4 = 4;
    ProducerFactory<String, String> producerFactory = publishRecords(testTopic, p0, p1, p2, p3, p4);
    SortedKafkaMessageBuffer<KafkaEventMessage> testBuffer =
            new LatchedSortedKafkaMessageBuffer<>(expectedNumberOfMessages, messageCounter);

    Map<TopicPartition, Long> testPositions = new HashMap<>();
    testPositions.put(new TopicPartition(testTopic, 0), 5L);
    testPositions.put(new TopicPartition(testTopic, 1), 1L);
    testPositions.put(new TopicPartition(testTopic, 2), 9L);
    testPositions.put(new TopicPartition(testTopic, 3), 4L);
    testPositions.put(new TopicPartition(testTopic, 4), 0L);
    KafkaTrackingToken testStartToken = KafkaTrackingToken.newInstance(testPositions);

    Consumer<String, String> testConsumer = consumerFactory(kafkaBroker).createConsumer(DEFAULT_GROUP_ID);
    testConsumer.subscribe(
            Collections.singletonList(testTopic),
            new TrackingTokenConsumerRebalanceListener<>(testConsumer, () -> testStartToken)
    );

    testSubject.poll(
            testConsumer,
            new TrackingRecordConverter<>(new ConsumerRecordConverter(), testStartToken),
            testBuffer::putAll
    );

    messageCounter.await();
    assertThat(testBuffer.size()).isEqualTo(expectedNumberOfMessages);
    assertMessagesCountPerPartition(expectedNumberOfMessages, p0, p1, p2, p3, p4, testBuffer);

    producerFactory.shutDown();
}
 
Example 17
Source File: CruiseControlMetricsReporterTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testReportingMetrics() throws ExecutionException, InterruptedException {
  Properties props = new Properties();
  props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers());
  props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MetricSerde.class.getName());
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testReportingMetrics");
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  setSecurityConfigs(props, "consumer");
  Consumer<String, CruiseControlMetric> consumer = new KafkaConsumer<>(props);

  consumer.subscribe(Collections.singleton(TOPIC));
  long startMs = System.currentTimeMillis();
  HashSet<Integer> expectedMetricTypes = new HashSet<>(Arrays.asList((int) ALL_TOPIC_BYTES_IN.id(),
                                                                     (int) ALL_TOPIC_BYTES_OUT.id(),
                                                                     (int) TOPIC_BYTES_IN.id(),
                                                                     (int) TOPIC_BYTES_OUT.id(),
                                                                     (int) PARTITION_SIZE.id(),
                                                                     (int) BROKER_CPU_UTIL.id(),
                                                                     (int) ALL_TOPIC_REPLICATION_BYTES_IN.id(),
                                                                     (int) ALL_TOPIC_REPLICATION_BYTES_OUT.id(),
                                                                     (int) ALL_TOPIC_PRODUCE_REQUEST_RATE.id(),
                                                                     (int) ALL_TOPIC_FETCH_REQUEST_RATE.id(),
                                                                     (int) ALL_TOPIC_MESSAGES_IN_PER_SEC.id(),
                                                                     (int) TOPIC_PRODUCE_REQUEST_RATE.id(),
                                                                     (int) TOPIC_FETCH_REQUEST_RATE.id(),
                                                                     (int) TOPIC_MESSAGES_IN_PER_SEC.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_RATE.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_RATE.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_RATE.id(),
                                                                     (int) BROKER_REQUEST_HANDLER_AVG_IDLE_PERCENT.id(),
                                                                     (int) BROKER_REQUEST_QUEUE_SIZE.id(),
                                                                     (int) BROKER_RESPONSE_QUEUE_SIZE.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_MAX.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_MAX.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_MAX.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_MAX.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_LOG_FLUSH_RATE.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_MAX.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_MEAN.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_50TH.id(),
                                                                     (int) BROKER_PRODUCE_REQUEST_QUEUE_TIME_MS_999TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_50TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_REQUEST_QUEUE_TIME_MS_999TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_50TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_REQUEST_QUEUE_TIME_MS_999TH.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_PRODUCE_TOTAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_TOTAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_TOTAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_PRODUCE_LOCAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_CONSUMER_FETCH_LOCAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_50TH.id(),
                                                                     (int) BROKER_FOLLOWER_FETCH_LOCAL_TIME_MS_999TH.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_50TH.id(),
                                                                     (int) BROKER_LOG_FLUSH_TIME_MS_999TH.id()));
  Set<Integer> metricTypes = new HashSet<>();
  ConsumerRecords<String, CruiseControlMetric> records;
  while (metricTypes.size() < expectedMetricTypes.size() && System.currentTimeMillis() < startMs + 15000) {
    records = consumer.poll(Duration.ofMillis(10L));
    for (ConsumerRecord<String, CruiseControlMetric> record : records) {
      metricTypes.add((int) record.value().rawMetricType().id());
    }
  }
  assertEquals("Expected " + expectedMetricTypes + ", but saw " + metricTypes, expectedMetricTypes, metricTypes);
}
 
Example 18
Source File: KafkaClientMetricsIntegrationTest.java    From micrometer with Apache License 2.0 4 votes vote down vote up
@Test
void shouldManageProducerAndConsumerMetrics() {
    SimpleMeterRegistry registry = new SimpleMeterRegistry();

    assertThat(registry.getMeters()).hasSize(0);

    Properties producerConfigs = new Properties();
    producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
            kafkaContainer.getBootstrapServers());
    Producer<String, String> producer = new KafkaProducer<>(
            producerConfigs, new StringSerializer(), new StringSerializer());

    KafkaClientMetrics producerKafkaMetrics = new KafkaClientMetrics(producer);
    producerKafkaMetrics.bindTo(registry);

    int producerMetrics = registry.getMeters().size();
    assertThat(registry.getMeters()).hasSizeGreaterThan(0);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    Properties consumerConfigs = new Properties();
    consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
            kafkaContainer.getBootstrapServers());
    consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
    Consumer<String, String> consumer = new KafkaConsumer<>(
            consumerConfigs, new StringDeserializer(), new StringDeserializer());

    KafkaClientMetrics consumerKafkaMetrics = new KafkaClientMetrics(consumer);
    consumerKafkaMetrics.bindTo(registry);

    //Printing out for discovery purposes
    out.println("Meters from producer before sending:");
    printMeters(registry);

    int producerAndConsumerMetrics = registry.getMeters().size();
    assertThat(registry.getMeters()).hasSizeGreaterThan(producerMetrics);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    String topic = "test";
    producer.send(new ProducerRecord<>(topic, "key", "value"));
    producer.flush();

    //Printing out for discovery purposes
    out.println("Meters from producer after sending and consumer before poll:");
    printMeters(registry);

    producerKafkaMetrics.checkAndBindMetrics(registry);

    int producerAndConsumerMetricsAfterSend = registry.getMeters().size();
    assertThat(registry.getMeters()).hasSizeGreaterThan(producerAndConsumerMetrics);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    consumer.subscribe(Collections.singletonList(topic));

    consumer.poll(Duration.ofMillis(100));

    //Printing out for discovery purposes
    out.println("Meters from producer and consumer after polling:");
    printMeters(registry);

    consumerKafkaMetrics.checkAndBindMetrics(registry);

    assertThat(registry.getMeters()).hasSizeGreaterThan(producerAndConsumerMetricsAfterSend);
    assertThat(registry.getMeters())
            .extracting(m -> m.getId().getTag("kafka-version"))
            .allMatch(v -> !v.isEmpty());

    //Printing out for discovery purposes
    out.println("All meters from producer and consumer:");
    printMeters(registry);

    producerKafkaMetrics.close();
    consumerKafkaMetrics.close();
}
 
Example 19
Source File: MessageClient.java    From alcor with Apache License 2.0 4 votes vote down vote up
public List<?> runConsumer(String topic, boolean keepRunning) {
    Logger logger = LoggerFactory.getLogger();

    if (this.messageConsumerFactory == null) {
        logger.log(Level.INFO, "No message consumer factory is specified");
        return null;
    }

    List recordsValue = new ArrayList();
    Consumer consumer = this.messageConsumerFactory.Create();
    consumer.subscribe(Collections.singletonList(topic));

    int noMessageFound = 0;
    while (keepRunning) {
        // 1000 milliseconds is the time consumer will wait if no record is found at broker.
        ConsumerRecords<Long, ?> consumerRecords = consumer.poll(1000);

        if (consumerRecords.count() == 0) {
            noMessageFound++;
            logger.log(Level.INFO, "No message found :" + noMessageFound);

            if (noMessageFound > IKafkaConfiguration.MAX_NO_MESSAGE_FOUND_COUNT)
                // If no message found count is reached to threshold exit loop.
                break;
            else
                continue;
        }

        //print each record.
        consumerRecords.forEach(record -> {
            logger.log(Level.INFO, "Record Key " + record.key());
            logger.log(Level.INFO, "Record value " + record.value());
            logger.log(Level.INFO, "Record partition " + record.partition());
            logger.log(Level.INFO, "Record offset " + record.offset());

            recordsValue.add(record.value());
        });
        // commits the offset of record to broker.
        consumer.commitAsync();
    }

    consumer.close();
    return recordsValue;
}
 
Example 20
Source File: MessageClient.java    From alcor with Apache License 2.0 4 votes vote down vote up
public List<?> runConsumer(String topic, boolean keepRunning) {
    Logger logger = LoggerFactory.getLogger();

    if (this.messageConsumerFactory == null) {
        logger.log(Level.INFO, "No message consumer factory is specified");
        return null;
    }

    List recordsValue = new ArrayList();
    Consumer consumer = this.messageConsumerFactory.Create();
    consumer.subscribe(Collections.singletonList(topic));

    int noMessageFound = 0;
    while (keepRunning) {
        // 1000 milliseconds is the time consumer will wait if no record is found at broker.
        ConsumerRecords<Long, ?> consumerRecords = consumer.poll(1000);

        if (consumerRecords.count() == 0) {
            noMessageFound++;
            logger.log(Level.INFO, "No message found :" + noMessageFound);

            if (noMessageFound > IKafkaConfiguration.MAX_NO_MESSAGE_FOUND_COUNT)
                // If no message found count is reached to threshold exit loop.
                break;
            else
                continue;
        }

        //print each record.
        consumerRecords.forEach(record -> {
            logger.log(Level.INFO, "Record Key " + record.key());
            logger.log(Level.INFO, "Record value " + record.value());
            logger.log(Level.INFO, "Record partition " + record.partition());
            logger.log(Level.INFO, "Record offset " + record.offset());

            recordsValue.add(record.value());
        });
        // commits the offset of record to broker.
        consumer.commitAsync();
    }

    consumer.close();
    return recordsValue;
}