Java Code Examples for org.apache.kafka.clients.consumer.Consumer#close()

The following examples show how to use org.apache.kafka.clients.consumer.Consumer#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OffsetSource.java    From kafka-backup with Apache License 2.0 6 votes vote down vote up
public void syncGroupForOffset(TopicPartition topicPartition, long sourceOffset, long targetOffset) {
    OffsetStoreFile offsetStoreFile = topicOffsets.get(topicPartition);
    // __consumer_offsets contains the offset of the message to read next. So we need to search for the offset + 1
    // if we do not do that we might miss
    List<String> groups = offsetStoreFile.groupForOffset(sourceOffset + 1);
    if (groups != null && groups.size() > 0) {
        for (String group : groups) {
            Map<String, Object> groupConsumerConfig = new HashMap<>(consumerConfig);
            groupConsumerConfig.put("group.id", group);
            Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(groupConsumerConfig);
            consumer.assign(Collections.singletonList(topicPartition));
            // ! Target Offset + 1 as we commit the offset of the "next message to read"
            OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(targetOffset + 1);
            Map<TopicPartition, OffsetAndMetadata> offsets = Collections.singletonMap(topicPartition, offsetAndMetadata);
            consumer.commitSync(offsets);
            consumer.close();
            log.debug("Committed target offset {} for group {} for topic {} partition {}",
                    (targetOffset + 1), group, topicPartition.topic(), topicPartition.partition());
        }
    }
}
 
Example 2
Source File: ThreadedConsumerExample.java    From kafka-streams-in-action with Apache License 2.0 6 votes vote down vote up
private Runnable getConsumerThread(Properties properties) {
    return () -> {
        Consumer<String, String> consumer = null;
        try {
            consumer = new KafkaConsumer<>(properties);
            consumer.subscribe(Collections.singletonList("test-topic"));
            while (!doneConsuming) {
                ConsumerRecords<String, String> records = consumer.poll(5000);
                for (ConsumerRecord<String, String> record : records) {
                    String message = String.format("Consumed: key = %s value = %s with offset = %d partition = %d",
                            record.key(), record.value(), record.offset(), record.partition());
                    System.out.println(message);
                }

            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (consumer != null) {
                consumer.close();
            }
        }
    };
}
 
Example 3
Source File: KafkaValidationUtil09.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public int getPartitionCount(
    String metadataBrokerList,
    String topic,
    Map<String, Object> kafkaClientConfigs,
    int messageSendMaxRetries,
    long retryBackoffMs
) throws StageException {
  int partitionCount = -1;
  Consumer<String, String> kafkaConsumer = null;
  try {
    kafkaConsumer = createTopicMetadataClient(metadataBrokerList, kafkaClientConfigs);
    List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);
    if(partitionInfoList != null) {
      partitionCount = partitionInfoList.size();
    }
  } catch (KafkaException e) {
    LOG.error(KafkaErrors.KAFKA_41.getMessage(), topic, e.toString(), e);
    throw new StageException(KafkaErrors.KAFKA_41, topic, e.toString());
  } finally {
    if (kafkaConsumer != null) {
      kafkaConsumer.close();
    }
  }
  return partitionCount;
}
 
Example 4
Source File: TrackingTokenConsumerRebalanceListenerIntegrationTest.java    From extension-kafka with Apache License 2.0 6 votes vote down vote up
@Test
void testSeekUsingEmptyTokenConsumerStartsAtPositionZero() {
    String topic = "testSeekUsing_EmptyToken_ConsumerStartsAtPositionZero";
    int numberOfPartitions = kafkaBroker.getPartitionsPerTopic();
    int recordsPerPartitions = 1;
    publishRecordsOnPartitions(producerFactory.createProducer(), topic, recordsPerPartitions, numberOfPartitions);

    int expectedRecordCount = numberOfPartitions * recordsPerPartitions;
    AtomicInteger recordCounter = new AtomicInteger();
    KafkaTrackingToken testToken = KafkaTrackingToken.newInstance(emptyMap());

    Consumer<?, ?> testConsumer = consumerFactory.createConsumer(DEFAULT_GROUP_ID);
    testConsumer.subscribe(
            Collections.singletonList(topic),
            new TrackingTokenConsumerRebalanceListener<>(testConsumer, () -> testToken)
    );

    getRecords(testConsumer).forEach(record -> {
        assertEquals(0, record.offset());
        recordCounter.getAndIncrement();
    });
    assertEquals(expectedRecordCount, recordCounter.get());

    testConsumer.close();
}
 
Example 5
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
public void close() {
  try (@SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock)) {
    if (!proceedClosing()) {
      return;
    }
    try {
      Consumer<K, V> delegate = this.delegate;
      if (delegate != null) {
        delegate.close();
      }
    } finally {
      this.delegate = null;
      closeMdsClient();
    }
  }
}
 
Example 6
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
@Deprecated
public void close(long timeout, TimeUnit unit) {
  try (@SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock)) {
    if (!proceedClosing()) {
      return;
    }
    try {
      Consumer<K, V> delegate = this.delegate;
      if (delegate != null) {
        delegate.close(timeout, unit);
      }
    } finally {
      this.delegate = null;
      closeMdsClient();
    }
  }
}
 
Example 7
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
public void close(Duration timeout) {
  try (@SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock)) {
    if (!proceedClosing()) {
      return;
    }
    try {
      Consumer<K, V> delegate = this.delegate;
      if (delegate != null) {
        delegate.close(timeout);
      }
    } finally {
      this.delegate = null;
      closeMdsClient();
    }
  }
}
 
Example 8
Source File: TestUtils.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
public static List<ConsumerRecord<Byte[], Byte[]>> consumeMessage(String bootstrapServer,
    String topicName,
    int timeoutMs
) throws InterruptedException {

  long time = new Date().getTime();
  Consumer<Byte[], Byte[]> consumer = createConsumer(bootstrapServer);
  consumer.subscribe(Collections.singletonList(topicName));

  List<ConsumerRecord<Byte[], Byte[]>> result = new ArrayList<>();
  while ((new Date().getTime()) - time < timeoutMs) {
    ConsumerRecords<Byte[], Byte[]> records = consumer.poll(1000);
    Iterator<ConsumerRecord<Byte[], Byte[]>> iterator = records.iterator();
    while (iterator.hasNext()) {
      result.add(iterator.next());
    }
    Thread.sleep(300);
  }
  consumer.close();
  return result;
}
 
Example 9
Source File: TestConsumer.java    From xxhadoop with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    // bin/kafka-topics.sh
    properties.put("zookeeper.connect", "node-01:2181,node-02:2181,node-03:2181");
    // kafka-console-producer.sh
    properties.put("metadata.broker.list", "node-02:9092,node-03:9092,node-04:9092");
    properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    // kafka-console-consumer.sh
    properties.put("bootstrap.servers", "node-02:9092,node-03:9092,node-04:9092");
    
    // must sepc group.id
    properties.put("group.id", "test-group-new");
    properties.put("auto.offset.reset", "earliest");
    
    Consumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
    consumer.subscribe(Arrays.asList("order-r"));
    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(1000);   // ms
            for (ConsumerRecord<String, String> record : records) {
                LOGGER.info("offset = {}, key = {}, value = {}\n", record.offset(), record.key(), record.value());
            }
        }
    } catch (Exception e) {
    } finally {
        consumer.close();
    }
    
}
 
Example 10
Source File: TrackingTokenConsumerRebalanceListenerIntegrationTest.java    From extension-kafka with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
void testSeekUsingExistingTokenConsumerStartsAtSpecificPosition() {
    String topic = "testSeekUsing_ExistingToken_ConsumerStartsAtSpecificPosition";
    int recordsPerPartitions = 10;
    publishRecordsOnPartitions(
            producerFactory.createProducer(), topic, recordsPerPartitions, kafkaBroker.getPartitionsPerTopic()
    );

    Map<TopicPartition, Long> positions = new HashMap<>();
    positions.put(new TopicPartition(topic, 0), 5L);
    positions.put(new TopicPartition(topic, 1), 1L);
    positions.put(new TopicPartition(topic, 2), 9L);
    positions.put(new TopicPartition(topic, 3), 4L);
    positions.put(new TopicPartition(topic, 4), 0L);
    KafkaTrackingToken testToken = KafkaTrackingToken.newInstance(positions);
    // This number corresponds to the steps the five partition's
    //  their offsets will increase given the published number of `recordsPerPartitions`
    int numberOfRecordsToConsume = 26;

    Consumer<?, ?> testConsumer = consumerFactory.createConsumer(DEFAULT_GROUP_ID);
    testConsumer.subscribe(
            Collections.singletonList(topic),
            new TrackingTokenConsumerRebalanceListener<>(testConsumer, () -> testToken)
    );

    Seq<ConsumerRecord<byte[], byte[]>> resultRecords =
            pollUntilAtLeastNumRecords((KafkaConsumer<byte[], byte[]>) testConsumer, numberOfRecordsToConsume);
    resultRecords.foreach(resultRecord -> {
        TopicPartition resultTopicPartition = new TopicPartition(resultRecord.topic(), resultRecord.partition());
        assertTrue(resultRecord.offset() > positions.get(resultTopicPartition));
        // This ugly stuff is needed since I have to deal with a scala.collection.Seq
        return null;
    });
    assertEquals(numberOfRecordsToConsume, resultRecords.count(COUNT_ALL));

    testConsumer.close();
}
 
Example 11
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_input_and_output_topics() {
  String inputTopic = testName.getMethodName() + "-input";
  String outputTopic = testName.getMethodName() + "-output";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic).to(outputTopic);
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreams(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  Consumer<String, String> consumer = createTracingConsumer(outputTopic);

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER);
  assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic);
  assertChildOf(spanOutput, spanInput);

  streams.close();
  streams.cleanUp();
  consumer.close();
}
 
Example 12
Source File: KafkaValidationUtil09.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public boolean validateTopicExistence(
  Stage.Context context,
  String groupName,
  String configName,
  List<HostAndPort> kafkaBrokers,
  String metadataBrokerList,
  String topic,
  Map<String, Object> kafkaClientConfigs,
  List<Stage.ConfigIssue> issues,
  boolean producer
) {
  boolean valid = true;
  if(topic == null || topic.isEmpty()) {
    issues.add(context.createConfigIssue(groupName, configName, KafkaErrors.KAFKA_05));
    valid = false;
  } else {
    List<PartitionInfo> partitionInfos;
    // Use Consumer to check if topic exists.
    // Using producer causes unintentionally creating a topic if not exist.
    Consumer<String, String> kafkaConsumer = null;
    try {
        kafkaConsumer = createTopicMetadataClient(
            metadataBrokerList,
            kafkaClientConfigs
        );
        partitionInfos = kafkaConsumer.partitionsFor(topic);
      if (null == partitionInfos || partitionInfos.isEmpty()) {
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_03,
                topic,
                metadataBrokerList
            )
        );
        valid = false;
      }
    } catch (KafkaException e) {
      LOG.error(KafkaErrors.KAFKA_68.getMessage(), topic, metadataBrokerList, e.toString(), e);
      issues.add(context.createConfigIssue(groupName, configName, KafkaErrors.KAFKA_68, topic, metadataBrokerList, e.toString()));
      valid = false;
    } finally {
      if (kafkaConsumer != null) {
        kafkaConsumer.close();
      }
    }
  }
  return valid;
}
 
Example 13
Source File: MessageClient.java    From alcor with Apache License 2.0 4 votes vote down vote up
public List<?> runConsumer(String topic, boolean keepRunning) {
    Logger logger = LoggerFactory.getLogger();

    if (this.messageConsumerFactory == null) {
        logger.log(Level.INFO, "No message consumer factory is specified");
        return null;
    }

    List recordsValue = new ArrayList();
    Consumer consumer = this.messageConsumerFactory.Create();
    consumer.subscribe(Collections.singletonList(topic));

    int noMessageFound = 0;
    while (keepRunning) {
        // 1000 milliseconds is the time consumer will wait if no record is found at broker.
        ConsumerRecords<Long, ?> consumerRecords = consumer.poll(1000);

        if (consumerRecords.count() == 0) {
            noMessageFound++;
            logger.log(Level.INFO, "No message found :" + noMessageFound);

            if (noMessageFound > IKafkaConfiguration.MAX_NO_MESSAGE_FOUND_COUNT)
                // If no message found count is reached to threshold exit loop.
                break;
            else
                continue;
        }

        //print each record.
        consumerRecords.forEach(record -> {
            logger.log(Level.INFO, "Record Key " + record.key());
            logger.log(Level.INFO, "Record value " + record.value());
            logger.log(Level.INFO, "Record partition " + record.partition());
            logger.log(Level.INFO, "Record offset " + record.offset());

            recordsValue.add(record.value());
        });
        // commits the offset of record to broker.
        consumer.commitAsync();
    }

    consumer.close();
    return recordsValue;
}
 
Example 14
Source File: KafkaMirrorMakerConnector.java    From brooklin with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Override
public void run() {
  Datastream datastream = _datastreamGroup.getDatastreams().get(0);
  String bootstrapValue = KafkaConnectionString.valueOf(datastream.getSource().getConnectionString())
      .getBrokers().stream()
      .map(KafkaBrokerAddress::toString)
      .collect(Collectors.joining(KafkaConnectionString.BROKER_LIST_DELIMITER));
  Consumer<?, ?> consumer = createConsumer(_consumerProperties, bootstrapValue,
      _groupIdConstructor.constructGroupId(datastream) + DEST_CONSUMER_GROUP_ID_SUFFIX);

  LOG.info("Fetch thread for {} started", _datastreamGroup.getName());
  while (!isInterrupted() && !_shutdown) {
    try {
      List<String> newPartitionInfo = getPartitionsInfo(consumer);
      LOG.debug("Fetch partition info for {}, oldPartitionInfo: {}, new Partition info: {}"
          , datastream.getName(), _subscribedPartitions, newPartitionInfo);

      if (!ListUtils.isEqualList(newPartitionInfo, _subscribedPartitions)) {
        LOG.info("get updated partition info for {}, oldPartitionInfo: {}, new Partition info: {}"
            , datastream.getName(), _subscribedPartitions, newPartitionInfo);

        _subscribedPartitions = Collections.synchronizedList(newPartitionInfo);
        _initialized = true;
        _partitionChangeCallback.accept(_datastreamGroup);
      }
      Thread.sleep(_partitionFetchIntervalMs);
    } catch (Throwable t) {
      // If the Broker goes down, the consumer will receive an exception. However, there is no need to
      // re-initiate the consumer when the Broker comes back. Kafka consumer will automatic reconnect
      LOG.warn("detect error for thread " + _datastreamGroup.getName() + ", ex: ", t);
      _dynamicMetricsManager.createOrUpdateMeter(MODULE, _datastreamGroup.getName(), NUM_PARTITION_FETCH_ERRORS, 1);
    }
  }

  if (consumer != null) {
    consumer.close();
  }

  consumer = null;
  LOG.info("PartitionDiscoveryThread for {} stopped", _datastreamGroup.getName());
}
 
Example 15
Source File: KafkaReceiver.java    From zerocode with Apache License 2.0 4 votes vote down vote up
public String receive(String kafkaServers, String topicName, String requestJsonWithConfig) throws IOException {

        ConsumerLocalConfigs consumerLocalConfigs = readConsumerLocalTestProperties(requestJsonWithConfig);

        ConsumerLocalConfigs effectiveLocal = deriveEffectiveConfigs(consumerLocalConfigs, consumerCommonConfigs);

        LOGGER.info("\n### Kafka Consumer Effective configs:{}\n", effectiveLocal);

        Consumer consumer = createConsumer(kafkaServers, consumerPropertyFile, topicName);

        final List<ConsumerRecord> rawRecords = new ArrayList<>();
        final List<ConsumerJsonRecord> jsonRecords = new ArrayList<>();

        int noOfTimeOuts = 0;

        handleSeekOffset(effectiveLocal, consumer);

        while (true) {
            LOGGER.info("polling records  - noOfTimeOuts reached : " + noOfTimeOuts);

            final ConsumerRecords records = consumer.poll(ofMillis(getPollTime(effectiveLocal)));

            if (records.count() == 0) {
                noOfTimeOuts++;
                if (noOfTimeOuts > getMaxTimeOuts(effectiveLocal)) {
                    break;
                } else {
                    continue;
                }
            } else {
                LOGGER.info("Got {} records after {} timeouts\n", records.count(), noOfTimeOuts);
                // -----------------------------------
                // reset after it fetched some records
                // -----------------------------------
                noOfTimeOuts = 0;
            }

            if (records != null) {
                Iterator recordIterator = records.iterator();

                LOGGER.info("Consumer chosen recordType: " + effectiveLocal.getRecordType());

                switch (effectiveLocal.getRecordType()) {
                    case RAW:
                        readRaw(rawRecords, recordIterator);
                        break;

                    case JSON:
                        readJson(jsonRecords, recordIterator);
                        break;

                    default:
                        throw new RuntimeException("Unsupported record type - '" + effectiveLocal.getRecordType()
                                + "'. Supported values are 'JSON','RAW'");
                }

            }

            handleCommitSyncAsync(consumer, consumerCommonConfigs, effectiveLocal);
        }

        consumer.close();

        handleRecordsDump(effectiveLocal, rawRecords, jsonRecords);

        return prepareResult(effectiveLocal, jsonRecords, rawRecords);

    }
 
Example 16
Source File: KafkaBinderTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testPolledConsumerWithDlq() throws Exception {
	KafkaTestBinder binder = getBinder();
	PollableSource<MessageHandler> inboundBindTarget = new DefaultPollableMessageSource(
			this.messageConverter);
	ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
	properties.getExtension().setPollTimeout(1);
	properties.setMaxAttempts(2);
	properties.setBackOffInitialInterval(0);
	properties.getExtension().setEnableDlq(true);
	Map<String, Object> producerProps = KafkaTestUtils
			.producerProps(embeddedKafka.getEmbeddedKafka());
	Binding<PollableSource<MessageHandler>> binding = binder.bindPollableConsumer(
			"pollableDlq", "group-pcWithDlq", inboundBindTarget, properties);
	KafkaTemplate template = new KafkaTemplate(
			new DefaultKafkaProducerFactory<>(producerProps));
	template.send("pollableDlq", "testPollableDLQ");
	try {
		int n = 0;
		while (n++ < 100) {
			inboundBindTarget.poll(m -> {
				throw new RuntimeException("test DLQ");
			});
			Thread.sleep(100);
		}
	}
	catch (MessageHandlingException e) {
		assertThat(e.getCause().getMessage()).isEqualTo("test DLQ");
	}
	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dlq", "false",
			embeddedKafka.getEmbeddedKafka());
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	ConsumerFactory cf = new DefaultKafkaConsumerFactory<>(consumerProps);
	Consumer consumer = cf.createConsumer();
	embeddedKafka.getEmbeddedKafka().consumeFromAnEmbeddedTopic(consumer,
			"error.pollableDlq.group-pcWithDlq");
	ConsumerRecord deadLetter = KafkaTestUtils.getSingleRecord(consumer,
			"error.pollableDlq.group-pcWithDlq");
	assertThat(deadLetter).isNotNull();
	assertThat(deadLetter.value()).isEqualTo("testPollableDLQ");
	binding.unbind();
	consumer.close();
}
 
Example 17
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * close any existing delegate client and create a new one based on the
 * latest config overrides
 * @param abortIfExists abort the operation if current delegate is not null
 * @return true if delegate client actually replaced
 */
private boolean recreateDelegate(boolean abortIfExists) {
  try (@SuppressWarnings("unused") CloseableLock swLock = new CloseableLock(delegateLock.writeLock())) {
    Set<TopicPartition> pausedPartitions = null;
    Consumer<K, V> prevConsumer = delegate;
    if (prevConsumer != null) {
      if (abortIfExists) {
        return false; //leave existing delegate as-is
      }
      pausedPartitions = prevConsumer.paused();
      delegate = null;
      try {
        try {
          prevConsumer.commitSync(Duration.ofSeconds(30));
        } finally {
          prevConsumer.close(Duration.ofSeconds(10));
        }
      } catch (Exception e) {
        LOG.error("error closing old delegate consumer", e);
      }
    }

    if (closing) {
      return false;
    }

    delegate = consumerFactory.create(baseConfig, LiKafkaClientsUtils.convertConfigMapToProperties(configOverrides));

    if (subscriptionPattern != null) {
      if (rebalanceListener != null) {
        delegate.subscribe(subscriptionPattern, rebalanceListener);
      } else {
        delegate.subscribe(subscriptionPattern);
      }
    } else if (subscribedTopics != null) {
      if (rebalanceListener != null) {
        delegate.subscribe(subscribedTopics, rebalanceListener);
      } else {
        delegate.subscribe(subscribedTopics);
      }
    } else if (assignedPartitions != null) {
      delegate.assign(assignedPartitions);
    }
    if (pausedPartitions != null && !pausedPartitions.isEmpty()) {
      //TODO - this may throw exception if rebalance hasnt completed. test this
      delegate.pause(pausedPartitions);
    }
    return true;
  }
}
 
Example 18
Source File: TrackingTokenConsumerRebalanceListenerIntegrationTest.java    From extension-kafka with Apache License 2.0 4 votes vote down vote up
@Test
void testSeekUsingExistingTokenConsumerStartsAtSpecificPositionAndCanContinueReadingNewRecords() {
    String topic = "testSeekUsing_ExistingToken_ConsumerStartsAtSpecificPosition_AndCanContinueReadingNewRecords";
    int recordsPerPartitions = 10;
    Producer<String, String> testProducer = producerFactory.createProducer();
    publishRecordsOnPartitions(
            testProducer, topic, recordsPerPartitions, kafkaBroker.getPartitionsPerTopic()
    );

    Map<TopicPartition, Long> positions = new HashMap<>();
    positions.put(new TopicPartition(topic, 0), 5L);
    positions.put(new TopicPartition(topic, 1), 1L);
    positions.put(new TopicPartition(topic, 2), 9L);
    positions.put(new TopicPartition(topic, 3), 4L);
    positions.put(new TopicPartition(topic, 4), 0L);
    KafkaTrackingToken testToken = KafkaTrackingToken.newInstance(positions);
    // This number corresponds to the steps the five partition's
    //  their offsets will increase given the published number of `recordsPerPartitions`
    int numberOfRecordsToConsume = 26;

    Consumer<?, ?> testConsumer = consumerFactory.createConsumer(DEFAULT_GROUP_ID);
    testConsumer.subscribe(
            Collections.singletonList(topic),
            new TrackingTokenConsumerRebalanceListener<>(testConsumer, () -> testToken)
    );

    //noinspection unchecked
    Seq<ConsumerRecord<byte[], byte[]>> resultRecords =
            pollUntilAtLeastNumRecords((KafkaConsumer<byte[], byte[]>) testConsumer, numberOfRecordsToConsume);
    resultRecords.foreach(resultRecord -> {
        TopicPartition resultTopicPartition = new TopicPartition(resultRecord.topic(), resultRecord.partition());
        assertTrue(resultRecord.offset() > positions.get(resultTopicPartition));
        // This ugly stuff is needed since I have to deal with a scala.collection.Seq
        return null;
    });
    assertEquals(numberOfRecordsToConsume, resultRecords.count(COUNT_ALL));

    publishNewRecords(testProducer, topic);
    int secondNumberOfRecords = 4; // The `publishNewRecords(Producer, String)` produces 4 new records
    //noinspection unchecked
    resultRecords = pollUntilAtLeastNumRecords((KafkaConsumer<byte[], byte[]>) testConsumer, secondNumberOfRecords);

    resultRecords.foreach(resultRecord -> {
        assertEquals(10, resultRecord.offset());
        // This ugly stuff is needed since I have to deal with a scala.collection.Seq
        return null;
    });
    assertEquals(secondNumberOfRecords, resultRecords.count(COUNT_ALL));

    testConsumer.close();
}
 
Example 19
Source File: KafkaUtils.java    From remoting-kafka-plugin with MIT License 4 votes vote down vote up
public static void unassignConsumer(Consumer<String, byte[]> consumer) {
    consumer.assign(new ArrayList<>());
    consumer.close();
}
 
Example 20
Source File: MessageClient.java    From alcor with Apache License 2.0 4 votes vote down vote up
public List<?> runConsumer(String topic, boolean keepRunning) {
    Logger logger = LoggerFactory.getLogger();

    if (this.messageConsumerFactory == null) {
        logger.log(Level.INFO, "No message consumer factory is specified");
        return null;
    }

    List recordsValue = new ArrayList();
    Consumer consumer = this.messageConsumerFactory.Create();
    consumer.subscribe(Collections.singletonList(topic));

    int noMessageFound = 0;
    while (keepRunning) {
        // 1000 milliseconds is the time consumer will wait if no record is found at broker.
        ConsumerRecords<Long, ?> consumerRecords = consumer.poll(1000);

        if (consumerRecords.count() == 0) {
            noMessageFound++;
            logger.log(Level.INFO, "No message found :" + noMessageFound);

            if (noMessageFound > IKafkaConfiguration.MAX_NO_MESSAGE_FOUND_COUNT)
                // If no message found count is reached to threshold exit loop.
                break;
            else
                continue;
        }

        //print each record.
        consumerRecords.forEach(record -> {
            logger.log(Level.INFO, "Record Key " + record.key());
            logger.log(Level.INFO, "Record value " + record.value());
            logger.log(Level.INFO, "Record partition " + record.partition());
            logger.log(Level.INFO, "Record offset " + record.offset());

            recordsValue.add(record.value());
        });
        // commits the offset of record to broker.
        consumer.commitAsync();
    }

    consumer.close();
    return recordsValue;
}