Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#committed()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#committed() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaDispatcherImpl.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
private void seekAndAssign(Collection<TopicPartition> partitions, KafkaConsumer<PlatformPartition, byte[]> consumer) {
	consumer.assign(partitions);
	if(config.isTransientOffsets()) {
		logger.info("Transient offsets enabled, seeking to latest");
		consumer.seekToEnd(partitions);
	}
	else {
		Set<TopicPartition> unknownPartitions = new HashSet<>();
		for(TopicPartition tp: partitions) {
			OffsetAndMetadata om = consumer.committed(tp);
			if(om == null) {
				unknownPartitions.add(tp);
			}
		}
	}
}
 
Example 2
Source File: NewApiTopicConsumer.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
/**
 * 按上次记录重置offsets
 */
private void resetCorrectOffsets(ConsumerWorker worker) {	
	
	KafkaConsumer<String, Serializable> consumer = worker.consumer;
	Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics();
	Set<String> topics = topicInfos.keySet();
	
	List<String> expectTopics = new ArrayList<>(topicHandlers.keySet());
	
	List<PartitionInfo> patitions = null;
	
	consumer.poll(200);
	
	for (String topic : topics) {
		if(!expectTopics.contains(topic))continue;
		
		patitions = topicInfos.get(topic);
		for (PartitionInfo partition : patitions) {
			try {						
				//期望的偏移
				long expectOffsets = consumerContext.getLatestProcessedOffsets(topic, partition.partition());
				//
				TopicPartition topicPartition = new TopicPartition(partition.topic(), partition.partition());
				OffsetAndMetadata metadata = consumer.committed(topicPartition);
				
				Set<TopicPartition> assignment = consumer.assignment();
				if(assignment.contains(topicPartition)){
					if(expectOffsets > 0 && expectOffsets < metadata.offset()){								
						consumer.seek(topicPartition, expectOffsets);
						//consumer.seekToBeginning(assignment);
				        logger.info(">>>>>>> seek Topic[{}] partition[{}] from {} to {}",topic, partition.partition(),metadata.offset(),expectOffsets);
					}
				}
			} catch (Exception e) {
				logger.warn("try seek topic["+topic+"] partition["+partition.partition()+"] offsets error");
			}
		}
	}
	consumer.resume(consumer.assignment());
}
 
Example 3
Source File: Kafka0_10ConsumerLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private boolean firstConnection(String topic, KafkaConsumer kafkaAuxiliaryConsumer) throws StageException {
  LOG.debug("Checking first connection for Topic {}", topic);
  if (topic != null && !topic.isEmpty()) {
    List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);
    for (PartitionInfo partitionInfo : partitionInfoList) {
      if (partitionInfo != null) {
        TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition());
        try {
          OffsetAndMetadata offsetAndMetadata = kafkaAuxiliaryConsumer.committed(topicPartition);
          if (offsetAndMetadata != null) {
            // Already defined offset for that partition
            LOG.debug("Offset defined for Topic {} , partition {}", topic, topicPartition.partition());
            kafkaAuxiliaryConsumer.close();
            return false;
          }
        } catch (Exception ex) {
          // Could not obtain committed offset for corresponding partition
          LOG.error(KafkaErrors.KAFKA_30.getMessage(), ex.toString(), ex);
          throw new StageException(KafkaErrors.KAFKA_30, ex.toString(), ex);
        }

      }
    }
  }

  // There was no offset already defined for any partition so it is the first connection
  return true;
}
 
Example 4
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testGiganticLargeMessages() throws Exception {
  MessageSplitter splitter = new MessageSplitterImpl(MAX_SEGMENT_SIZE,
      new DefaultSegmentSerializer(),
      new UUIDFactory.DefaultUUIDFactory<>());

  String topic = "testGiganticLargeMessages";
  createTopic(topic);
  TopicPartition tp = new TopicPartition(topic, 0);
  Collection<TopicPartition> tps = new ArrayList<>(Collections.singletonList(tp));

  //send 2 interleaved gigantic msgs

  Producer<byte[], byte[]> producer = createRawProducer();
  // M0, 20 segments
  UUID messageId0 = LiKafkaClientsUtils.randomUUID();
  String message0 = KafkaTestUtils.getRandomString(20 * MAX_SEGMENT_SIZE);
  List<ProducerRecord<byte[], byte[]>> m0Segs = splitter.split(topic, 0, messageId0, message0.getBytes());
  // M1, 30 segments
  UUID messageId1 = LiKafkaClientsUtils.randomUUID();
  String message1 = KafkaTestUtils.getRandomString(30 * MAX_SEGMENT_SIZE);
  List<ProducerRecord<byte[], byte[]>> m1Segs = splitter.split(topic, 0, messageId1, message1.getBytes());

  List<ProducerRecord<byte[], byte[]>> interleaved = interleave(m0Segs, m1Segs);
  for (ProducerRecord<byte[], byte[]> rec : interleaved) {
    producer.send(rec).get();
  }

  //create a consumer with not enough memory to assemble either

  Properties props = new Properties();
  String groupId = "testGiganticLargeMessages-" + UUID.randomUUID();
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  // Make sure we start to consume from the beginning.
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  // Only fetch one record at a time.
  props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
  // No auto commit
  props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
  // Not enough memory to assemble anything
  props.setProperty(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_BUFFER_CAPACITY_CONFIG, "" + (MAX_SEGMENT_SIZE + 1));
  props.setProperty(LiKafkaConsumerConfig.EXCEPTION_ON_MESSAGE_DROPPED_CONFIG, "false");

  LiKafkaConsumer<String, String> tempConsumer = createConsumer(props);
  tempConsumer.assign(tps);

  //traverse entire partition

  int topicSize = interleaved.size();
  long timeout = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(120);
  int msgsDelivered = 0;
  while (true) {
    ConsumerRecords<String, String> records = tempConsumer.poll(1000);
    msgsDelivered += records.count();
    long position = tempConsumer.position(tp);
    if (position >= topicSize) {
      break;
    }
    if (System.currentTimeMillis() > timeout) {
      throw new IllegalStateException("unable to consume to  the end of the topic within timeout."
          + " position=" + position + ". end=" + topicSize);
    }
  }

  Assert.assertTrue(msgsDelivered == 0, "no msgs were expected to be delivered. instead got " + msgsDelivered);

  //make sure offsets committed reflect the msgs we've given up on

  tempConsumer.commitSync();
  OffsetAndMetadata committed = tempConsumer.committed(tp);
  Assert.assertEquals(committed.offset(), topicSize); //li consumer would claim to be at end

  Properties vanillaProps = getConsumerProperties(props);
  KafkaConsumer<String, String> vanillaConsumer = new KafkaConsumer<>(vanillaProps);
  vanillaConsumer.assign(tps);
  OffsetAndMetadata vanillaCommitted = vanillaConsumer.committed(tp);
  Assert.assertEquals(vanillaCommitted.offset(), topicSize - 1); //vanilla offset is one before (1 fragment in buffer)
}