org.apache.kafka.clients.consumer.OffsetAndMetadata Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.OffsetAndMetadata. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: OffsetCommitSyncPartition.java    From BigData-In-Practice with Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();

    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (TopicPartition partition : records.partitions()) {
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : partitionRecords) {
                    //do some logical processing.
                }
                long lastConsumedOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                consumer.commitSync(Collections.singletonMap(partition,
                        new OffsetAndMetadata(lastConsumedOffset + 1)));
            }
        }
    } finally {
        consumer.close();
    }
}
 
Example #2
Source File: KafkaPipeLine.java    From bireme with Apache License 2.0 6 votes vote down vote up
@Override
public void commit() {
  HashMap<TopicPartition, OffsetAndMetadata> offsets =
      new HashMap<TopicPartition, OffsetAndMetadata>();

  partitionOffset.forEach((key, value) -> {
    String topic = key.split("\\+")[0];
    int partition = Integer.valueOf(key.split("\\+")[1]);
    offsets.put(new TopicPartition(topic, partition), new OffsetAndMetadata(value + 1));
  });

  consumer.commitSync(offsets);
  committed.set(true);
  partitionOffset.clear();

  // record the time being committed
  timerCTX.stop();

  stat.newestCompleted = newestRecord;
  stat.delay = new Date().getTime() - start.getTime();
}
 
Example #3
Source File: ProcessingPartition.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the reset offset used in situations where the consumer has no committed offset for a partition, or its committed
 * offset is out of range. The returned offset is ensured to be committed, if {@link ProcessingConfig#getCommitInitialOffset()
 * allowed} by the configuration.
 *
 * @return the reset offset
 */
private long getCommittedResetOffset() {
    // Get the reset offset
    long resetOffset = getResetOffset();

    LOGGER.debug("Using reset offset [{}] for partition [{}] as last committed offset", resetOffset, topicPartition);

    // Consumer doesn't have an offset so try to commit the offset. This can be helpful for monitoring in case
    // there are no messages in the queue or processing is failing
    if (config.getCommitInitialOffset()) {
        try {
            consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(resetOffset)));
        } catch (KafkaException e) {
            LOGGER.warn("Unable to commit reset offset {} during initialization of partition {} for group {}", resetOffset,
                    topicPartition, config.getGroupId(), e);
        }
    }

    return resetOffset;
}
 
Example #4
Source File: OffsetMapQueueSinkTest.java    From beast with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldPushMultipleMessagesToQueue() throws InterruptedException {
    BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> queue = new LinkedBlockingQueue<>();
    queueSink = new OffsetMapQueueSink(queue, queueConfig);
    Records messages = new Records(Arrays.asList(new Record(offsetInfo, new HashMap<>()), new Record(offsetInfo, new HashMap<>())));

    Status status = queueSink.push(messages);

    assertTrue(status.isSuccess());
    assertEquals(1, queue.size());
    Map<TopicPartition, OffsetAndMetadata> partitionsCommitOffset = queue.take();
    assertEquals(1, partitionsCommitOffset.size());
    Map.Entry<TopicPartition, OffsetAndMetadata> offset = partitionsCommitOffset.entrySet().iterator().next();
    assertEquals(offset.getKey().topic(), "default-topic");
    assertEquals(offset.getKey().partition(), 0);
    assertEquals(offset.getValue().offset(), 1);
}
 
Example #5
Source File: ConsumerTest.java    From kbear with Apache License 2.0 6 votes vote down vote up
protected void commitSync(java.util.function.Consumer<Consumer<String, String>> committer)
        throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumerWithoutAutoCommit()) {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);

        OffsetAndMetadata committed = consumer.committed(_topicPartition);
        System.out.println("committed: " + committed);
        committer.accept(consumer);
        OffsetAndMetadata committed2 = consumer.committed(_topicPartition);
        System.out.println("committed2: " + committed2);
        Assert.assertTrue(committed2.offset() > committed.offset());
    }
}
 
Example #6
Source File: KafkaConsumerThread.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tells this thread to commit a set of offsets. This method does not block, the committing
 * operation will happen asynchronously.
 *
 * <p>Only one commit operation may be pending at any time. If the committing takes longer than
 * the frequency with which this method is called, then some commits may be skipped due to being
 * superseded by newer ones.
 *
 * @param offsetsToCommit The offsets to commit
 * @param commitCallback callback when Kafka commit completes
 */
void setOffsetsToCommit(
		Map<TopicPartition, OffsetAndMetadata> offsetsToCommit,
		@Nonnull KafkaCommitCallback commitCallback) {

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	if (nextOffsetsToCommit.getAndSet(Tuple2.of(offsetsToCommit, commitCallback)) != null) {
		log.warn("Committing offsets to Kafka takes longer than the checkpoint interval. " +
				"Skipping commit of previous offsets because newer complete checkpoint offsets are available. " +
				"This does not compromise Flink's checkpoint integrity.");
	}

	// if the consumer is blocked in a poll() or handover operation, wake it up to commit soon
	handover.wakeupProducer();

	synchronized (consumerReassignmentLock) {
		if (consumer != null) {
			consumer.wakeup();
		} else {
			// the consumer is currently isolated for partition reassignment;
			// set this flag so that the wakeup state is restored once the reassignment is complete
			hasBufferedWakeup = true;
		}
	}
}
 
Example #7
Source File: KafkaBinderMetricsTest.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldSumUpPartitionsLags() {
	Map<TopicPartition, Long> endOffsets = new HashMap<>();
	endOffsets.put(new TopicPartition(TEST_TOPIC, 0), 1000L);
	endOffsets.put(new TopicPartition(TEST_TOPIC, 1), 1000L);
	org.mockito.BDDMockito
			.given(consumer.endOffsets(ArgumentMatchers.anyCollection()))
			.willReturn(endOffsets);
	org.mockito.BDDMockito
			.given(consumer.committed(ArgumentMatchers.any(TopicPartition.class)))
			.willReturn(new OffsetAndMetadata(500));
	List<PartitionInfo> partitions = partitions(new Node(0, null, 0),
			new Node(0, null, 0));
	topicsInUse.put(TEST_TOPIC,
			new TopicInformation("group2-metrics", partitions, false));
	org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC))
			.willReturn(partitions);
	metrics.bindTo(meterRegistry);
	assertThat(meterRegistry.getMeters()).hasSize(1);
	assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME)
			.tag("group", "group2-metrics").tag("topic", TEST_TOPIC).gauge().value())
					.isEqualTo(1000.0);
}
 
Example #8
Source File: ConsumerLease.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
private void processRecords(final ConsumerRecords<byte[], byte[]> records) {

        records.partitions().stream().forEach(partition -> {
            List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition);
            if (!messages.isEmpty()) {
                //update maximum offset map for this topic partition
                long maxOffset = messages.stream()
                        .mapToLong(record -> record.offset())
                        .max()
                        .getAsLong();
                uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L));

                //write records to content repository and session
                if (demarcatorBytes == null) {
                    totalFlowFiles += messages.size();
                    messages.stream().forEach(message -> {
                        writeData(getProcessSession(), message, partition);
                    });
                } else {
                    writeData(getProcessSession(), messages, partition);
                }
            }
        });
    }
 
Example #9
Source File: LiKafkaOffsetCommitCallback.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsetAndMetadataMap, Exception e) {
  if (_userCallback != null) {
    Map<TopicPartition, OffsetAndMetadata> userOffsetMap = topicPartitionOffsetAndMetadataMap;
    if (topicPartitionOffsetAndMetadataMap != null) {
      userOffsetMap = new HashMap<>();
      for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : topicPartitionOffsetAndMetadataMap.entrySet()) {
        String rawMetadata = entry.getValue().metadata();
        long userOffset = LiKafkaClientsUtils.offsetFromWrappedMetadata(rawMetadata);
        String userMetadata = LiKafkaClientsUtils.metadataFromWrappedMetadata(rawMetadata);
        userOffsetMap.put(entry.getKey(), new OffsetAndMetadata(userOffset, userMetadata));
      }
    }
    _userCallback.onComplete(userOffsetMap, e);
  }
}
 
Example #10
Source File: KafkaConsumerThread.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tells this thread to commit a set of offsets. This method does not block, the committing
 * operation will happen asynchronously.
 *
 * <p>Only one commit operation may be pending at any time. If the committing takes longer than
 * the frequency with which this method is called, then some commits may be skipped due to being
 * superseded by newer ones.
 *
 * @param offsetsToCommit The offsets to commit
 * @param commitCallback callback when Kafka commit completes
 */
void setOffsetsToCommit(
		Map<TopicPartition, OffsetAndMetadata> offsetsToCommit,
		@Nonnull KafkaCommitCallback commitCallback) {

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	if (nextOffsetsToCommit.getAndSet(Tuple2.of(offsetsToCommit, commitCallback)) != null) {
		log.warn("Committing offsets to Kafka takes longer than the checkpoint interval. " +
				"Skipping commit of previous offsets because newer complete checkpoint offsets are available. " +
				"This does not compromise Flink's checkpoint integrity.");
	}

	// if the consumer is blocked in a poll() or handover operation, wake it up to commit soon
	handover.wakeupProducer();

	synchronized (consumerReassignmentLock) {
		if (consumer != null) {
			consumer.wakeup();
		} else {
			// the consumer is currently isolated for partition reassignment;
			// set this flag so that the wakeup state is restored once the reassignment is complete
			hasBufferedWakeup = true;
		}
	}
}
 
Example #11
Source File: Records.java    From beast with Apache License 2.0 6 votes vote down vote up
public Map<TopicPartition, OffsetAndMetadata> getPartitionsCommitOffset() {
    // kafka commit requires offset + 1 (next offset)
    if (!partitionsCommitOffset.isEmpty()) {
        return partitionsCommitOffset;
    }
    records.forEach(r -> {
        OffsetInfo offsetInfo = r.getOffsetInfo();
        TopicPartition key = offsetInfo.getTopicPartition();
        OffsetMetadata value = new OffsetMetadata(offsetInfo.getOffset() + 1);
        OffsetMetadata previousOffset = (OffsetMetadata) partitionsCommitOffset.getOrDefault(key, new OffsetMetadata(Integer.MIN_VALUE));
        if (previousOffset.compareTo(value) < 0) {
            partitionsCommitOffset.put(key, value);
        }
    });
    return partitionsCommitOffset;
}
 
Example #12
Source File: ProcessingPartitionTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Before
public void before() {
    properties = new Properties();
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, OffsetResetStrategy.EARLIEST.toString().toLowerCase());

    config = new ProcessingConfig(properties);
    topicPartition = new TopicPartition("topic", 1);

    when(consumer.committed(topicPartition)).thenReturn(new OffsetAndMetadata(0L));

    partition = new MockProcessingPartition<>(topicPartition, config, consumer);

    logAppender = new TestLogAppender();
    RootLogger.getRootLogger().addAppender(logAppender);
}
 
Example #13
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
private void seedBadOffset(String topic, Properties openSourceConsumerProperties, long offset, long diffFromEnd) {
  try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(openSourceConsumerProperties)) {
    List<PartitionInfo> partitionsForTopic = kafkaConsumer.partitionsFor(topic);
    List<TopicPartition> tps = new ArrayList<>();
    for (int i = 0; i < partitionsForTopic.size(); i++) {
      tps.add(new TopicPartition(topic, i));
    }
    Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(tps);
    Map<TopicPartition, OffsetAndMetadata> badOffsetMap = new HashMap<>();
    for (Map.Entry<TopicPartition, Long> endOffsetEntry : endOffsets.entrySet()) {
      if (endOffsetEntry.getValue() == null || endOffsetEntry.getValue() == -1) {
        continue;
      }
      long badOffset = endOffsetEntry.getValue() + diffFromEnd;
      OffsetAndMetadata om = new OffsetAndMetadata(offset, badOffset + ",");
      badOffsetMap.put(endOffsetEntry.getKey(), om);
    }
    kafkaConsumer.commitSync(badOffsetMap);
  }
}
 
Example #14
Source File: ConsumerOffsetClientTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void getCommittedOffsets() {
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("topic1", 0), 123L);
    offsets.put(new TopicPartition("topic1", 1), 234L);
    offsets.put(new TopicPartition("topic2", 0), -1L);
    offsets.put(new TopicPartition("topic2", 1), -1L);

    when(consumer.partitionsFor("topic1")).thenReturn(Arrays.asList(
            new PartitionInfo("topic1", 0, null, null, null),
            new PartitionInfo("topic1", 1, null, null, null)));
    when(consumer.partitionsFor("topic2")).thenReturn(Arrays.asList(
            new PartitionInfo("topic2", 0, null, null, null),
            new PartitionInfo("topic2", 1, null, null, null)));

    when(consumer.committed(new TopicPartition("topic1", 0))).thenReturn(new OffsetAndMetadata(123L));
    when(consumer.committed(new TopicPartition("topic1", 1))).thenReturn(new OffsetAndMetadata(234L));

    assertThat(client.getCommittedOffsets(Arrays.asList("topic1", "topic2")), is(offsets));
}
 
Example #15
Source File: BackupSinkTask.java    From kafka-backup with Apache License 2.0 5 votes vote down vote up
@Override
public void flush(Map<TopicPartition, OffsetAndMetadata> currentOffsets) {
    try {
        for (PartitionWriter partitionWriter : partitionWriters.values()) {
            partitionWriter.flush();
            log.debug("Flushed Topic {}, Partition {}"
                    , partitionWriter.topic(), partitionWriter.partition());
        }
        offsetSink.flush();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
 
Example #16
Source File: OffsetCommitWorkerTest.java    From beast with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    pollTimeout = 200;
    offsetBatchDuration = 1000;
    ackTimeoutTime = 2000;
    queueConfig = new QueueConfig(pollTimeout);
    commitPartitionsOffset = new HashMap<TopicPartition, OffsetAndMetadata>() {{
        put(new TopicPartition("topic", 0), new OffsetAndMetadata(1));
    }};
    CopyOnWriteArraySet<Map<TopicPartition, OffsetAndMetadata>> ackSet = new CopyOnWriteArraySet<>();
    acknowledgements = Collections.synchronizedSet(ackSet);
    offsetState = new OffsetState(acknowledgements, ackTimeoutTime, offsetBatchDuration);
    workerState = new WorkerState();
    offsetAcknowledger = new OffsetAcknowledger(acknowledgements);
}
 
Example #17
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Get kafka 0.10.x, 1.x, 2.x offset from topic.
 */
public String getKafkaOffset(String clusterAlias) {
	Properties prop = new Properties();
	prop.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, parseBrokerServer(clusterAlias));

	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(prop, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(prop, clusterAlias);
	}
	JSONArray targets = new JSONArray();
	AdminClient adminClient = null;
	try {
		adminClient = AdminClient.create(prop);
		ListConsumerGroupsResult consumerGroups = adminClient.listConsumerGroups();
		java.util.Iterator<ConsumerGroupListing> groups = consumerGroups.all().get().iterator();
		while (groups.hasNext()) {
			String groupId = groups.next().groupId();
			if (!groupId.contains("kafka.eagle")) {
				ListConsumerGroupOffsetsResult offsets = adminClient.listConsumerGroupOffsets(groupId);
				for (Entry<TopicPartition, OffsetAndMetadata> entry : offsets.partitionsToOffsetAndMetadata().get().entrySet()) {
					JSONObject object = new JSONObject();
					object.put("group", groupId);
					object.put("topic", entry.getKey().topic());
					object.put("partition", entry.getKey().partition());
					object.put("offset", entry.getValue().offset());
					object.put("timestamp", CalendarUtils.getDate());
					targets.add(object);
				}
			}
		}
	} catch (Exception e) {
		LOG.error("Get consumer offset has error, msg is " + e.getMessage());
		e.printStackTrace();
	} finally {
		adminClient.close();
	}
	return targets.toJSONString();
}
 
Example #18
Source File: ClientKafkaMonitor.java    From Kafdrop with Apache License 2.0 5 votes vote down vote up
private ConsumerPartitionVO createConsumerPartition(String groupId,
                                                    TopicPartition topicPartition,
                                                    OffsetAndMetadata offset)
{
   ConsumerPartitionVO vo = new ConsumerPartitionVO(groupId, topicPartition.topic(), topicPartition.partition());
   vo.setConsumerOffset(new ConsumerOffsetVO(-1, offset.offset()));
   return vo;
}
 
Example #19
Source File: KafkaSpout.java    From storm_spring_boot_demo with MIT License 5 votes vote down vote up
private void initialize(Collection<TopicPartition> partitions) {
    if (!consumerAutoCommitMode) {
        acked.keySet().retainAll(partitions);   // remove from acked all partitions that are no longer assigned to this spout
    }

    retryService.retainAll(partitions);

    /*
     * Emitted messages for partitions that are no longer assigned to this spout can't
     * be acked and should not be retried, hence remove them from emitted collection.
    */
    Set<TopicPartition> partitionsSet = new HashSet<>(partitions);
    Iterator<KafkaSpoutMessageId> msgIdIterator = emitted.iterator();
    while (msgIdIterator.hasNext()) {
        KafkaSpoutMessageId msgId = msgIdIterator.next();
        if (!partitionsSet.contains(msgId.getTopicPartition())) {
            msgIdIterator.remove();
        }
    }

    for (TopicPartition tp : partitions) {
        final OffsetAndMetadata committedOffset = kafkaConsumer.committed(tp);
        final long fetchOffset = doSeek(tp, committedOffset);
        setAcked(tp, fetchOffset);
    }
    initialized = true;
    LOG.info("Initialization complete");
}
 
Example #20
Source File: ConsumerProxy.java    From kbear with Apache License 2.0 5 votes vote down vote up
@Override
public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) {
    ObjectExtension.requireNonNull(offsets, "offsets");

    runWithoutConcurrency(() -> {
        Map<String, Map<TopicPartition, OffsetAndMetadata>> byTopic = new HashMap<>();
        offsets.forEach((tp, oam) -> byTopic.computeIfAbsent(tp.topic(), k -> new HashMap<>()).put(tp, oam));
        byTopic.forEach((t, os) -> _consumerHolders.get(t).getConsumer().commitAsync(os, callback));
    });
}
 
Example #21
Source File: ConsumerProxy.java    From kbear with Apache License 2.0 5 votes vote down vote up
@Override
public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, Duration timeout) {
    ObjectExtension.requireNonNull(timeout, "timeout");
    if (timeout.toMillis() < 0)
        throw new IllegalArgumentException("timeout must not be negative");

    Map<String, Map<TopicPartition, OffsetAndMetadata>> map = toMap(offsets);
    runWithoutConcurrency(
            () -> forEach(map::containsKey, (t, c) -> c.getConsumer().commitSync(map.get(t), timeout)));
}
 
Example #22
Source File: ProcessingKafkaConsumerTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void commitOffsets() {
    long previousCommitCount = ProcessingKafkaConsumer.COMMIT_METER.count();

    // Read a bunch of messages
    processingConsumer.nextRecord(POLL_TIME); // record 1
    processingConsumer.nextRecord(POLL_TIME); // null
    processingConsumer.nextRecord(POLL_TIME); // record 2
    processingConsumer.nextRecord(POLL_TIME); // record 3
    processingConsumer.nextRecord(POLL_TIME); // null
    processingConsumer.nextRecord(POLL_TIME); // record 4
    processingConsumer.nextRecord(POLL_TIME); // record 5
    processingConsumer.nextRecord(POLL_TIME); // record 6

    // Ack some of the messages. We should now have some acked and some pending
    processingConsumer.ack(topicPartition, record1.offset());
    processingConsumer.ack(topicPartition, record3.offset());
    processingConsumer.ack(new TopicPartition(record5.topic(), record5.partition()), record5.offset());

    processingConsumer.commitOffsets();

    assertThat(ProcessingKafkaConsumer.COMMIT_METER.count(), is(previousCommitCount + 1));

    Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();

    // Although record 3 is completed record 2 is pending so for this partition record 1 is as high as we can commit
    committedOffsets.put(topicPartition, new OffsetAndMetadata(record1.offset() + 1));

    committedOffsets.put(new TopicPartition(record5.topic(), record5.partition()),
            new OffsetAndMetadata(record5.offset() + 1));

    verify(consumer).commitSync(committedOffsets);
    assertThat(processingConsumer.getCommittableOffsets().isEmpty(), is(true));
}
 
Example #23
Source File: OffsetCommitCallbackImpl.java    From kafka-workers with Apache License 2.0 5 votes vote down vote up
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
    if (exception != null) {
        if (exception instanceof RetriableCommitFailedException) {
            final int failureNum = failuresInRow.incrementAndGet();
            if (failureNum <= maxFailuresInRow) {
                logger.warn("retriable commit failed exception: {}, offsets: {}, failureNum: {}/{}",
                        exception, offsets, failureNum, maxFailuresInRow);
            } else {
                logger.error("retriable commit failed exception: {}, offsets: {}, failureNum: {}/{}",
                        exception, offsets, failureNum, maxFailuresInRow);
                consumerThread.shutdown(new FailedCommitException(exception));
            }
        } else {
            logger.error("commit failed exception: {}, offsets: {}", exception, offsets);
            consumerThread.shutdown(new FailedCommitException(exception));
        }
    } else {
        logger.debug("commit succeeded, offsets: {}", offsets);
        failuresInRow.set(0);
        for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
            TopicPartition partition = entry.getKey();
            long offset = entry.getValue().offset();
            metrics.recordSensor(WorkersMetrics.COMMITTED_OFFSET_METRIC, partition, offset);
        }
        offsetsState.removeCommitted(offsets);
    }
}
 
Example #24
Source File: KafkaConsumerThread.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception ex) {
	commitInProgress = false;

	if (ex != null) {
		log.warn("Committing offsets to Kafka failed. This does not compromise Flink's checkpoints.", ex);
		internalCommitCallback.onException(ex);
	} else {
		internalCommitCallback.onSuccess();
	}
}
 
Example #25
Source File: Kafka09Fetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
		Map<KafkaTopicPartition, Long> offsets,
		@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	@SuppressWarnings("unchecked")
	List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}
 
Example #26
Source File: KafkaConsumerThread.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception ex) {
	commitInProgress = false;

	if (ex != null) {
		log.warn("Committing offsets to Kafka failed. This does not compromise Flink's checkpoints.", ex);
		internalCommitCallback.onException(ex);
	} else {
		internalCommitCallback.onSuccess();
	}
}
 
Example #27
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private OffsetAndMetadata commitAndRetrieveOffsets(
    LiKafkaConsumer<String, String> consumer,
    TopicPartition tp, Map<TopicPartition,
    OffsetAndMetadata> offsetMap) throws Exception {
  final AtomicBoolean callbackFired = new AtomicBoolean(false);
  final AtomicReference<Exception> offsetCommitIssue = new AtomicReference<>(null);
  OffsetAndMetadata committed = null;
  long now = System.currentTimeMillis();
  long deadline = now + TimeUnit.MINUTES.toMillis(1);
  while (System.currentTimeMillis() < deadline) {
    //call commitAsync, wait for a NON-NULL return value (see https://issues.apache.org/jira/browse/KAFKA-6183)
    OffsetCommitCallback commitCallback = new OffsetCommitCallback() {
      @Override
      public void onComplete(Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsetAndMetadataMap, Exception e) {
        if (e != null) {
          offsetCommitIssue.set(e);
        }
        callbackFired.set(true);
      }
    };
    if (offsetMap != null) {
      consumer.commitAsync(offsetMap, commitCallback);
    } else {
      consumer.commitAsync(commitCallback);
    }
    while (!callbackFired.get()) {
      consumer.poll(20);
    }
    Assert.assertNull(offsetCommitIssue.get(), "offset commit failed");
    committed = consumer.committed(tp);
    if (committed != null) {
      break;
    }
    Thread.sleep(100);
  }
  assertNotNull(committed, "unable to retrieve committed offsets within timeout");
  return committed;
}
 
Example #28
Source File: SysCommonService.java    From xmfcn-spring-cloud with Apache License 2.0 5 votes vote down vote up
/**
 * 获取每个分区的数据
 *
 * @param kafkaConsumer
 * @param topic
 * @param kafkaReader
 * @param partition
 * @param partitionRecords
 */
private void getPartitionRecords(KafkaConsumer<String, String> kafkaConsumer, String topic, IKafkaReader kafkaReader, TopicPartition partition, List<ConsumerRecord<String, String>> partitionRecords) {
    boolean isSleep;
    for (ConsumerRecord<String, String> record : partitionRecords) {
        String value = record.value();//数据
        if (StringUtil.isBlank(value)) {
            continue;
        }
        String key = record.key();
        long offset = record.offset();
        JSONObject json = new JSONObject();
        json.put("key", key);
        json.put("value", value);
        json.put("offset", offset);
        json.put("topic", topic);
        String classMethod = this.getClass().getName() + ".getPartitionRecords()";
        ThreadPoolUtil.getThreadPoolIsNext(cachedThreadPool, classMethod);//判断激活的线程数量与最大线程的比列 如果大于80% 则暂停N秒
        cachedThreadPool.execute(() -> {
            try {
                RetData aReturn = kafkaReader.execute(json);
                isRetryKafka(topic, json, aReturn);
            } catch (Exception e) {
                logger.error("处理kafka数据异常:" + StringUtil.getExceptionMsg(e) + "===>原始数据:" + json);
                e.printStackTrace();
            }
        });
        // 逐个异步提交消费成功,避免异常导致无法提交而造成重复消费
        kafkaConsumer.commitAsync(Collections.singletonMap(partition, new OffsetAndMetadata(record.offset() + 1)), (map, e) -> {
            if (e != null) {
                logger.error(" 提交失败 offset={},e={}", record.offset(), e);
            }
        });
    }
}
 
Example #29
Source File: ProducerSpEL.java    From beam with Apache License 2.0 5 votes vote down vote up
static void sendOffsetsToTransaction(
    Producer<?, ?> producer,
    Map<TopicPartition, OffsetAndMetadata> offsets,
    String consumerGroupId) {
  ensureTransactionsSupport();
  invoke(sendOffsetsToTransactionMethod, producer, offsets, consumerGroupId);
}
 
Example #30
Source File: KafkaConsumerGroupService.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
public List<PartitionAssignmentState> collectGroupAssignment(
        String group) throws ExecutionException, InterruptedException {
    DescribeConsumerGroupsResult groupResult = adminClient
            .describeConsumerGroups(Collections.singleton(group));
    ConsumerGroupDescription description =
            groupResult.all().get().get(group);

    List<TopicPartition> assignedTps = new ArrayList<>();
    List<PartitionAssignmentState> rowsWithConsumer = new ArrayList<>();
    Collection<MemberDescription> members = description.members();
    if (members != null) {
        ListConsumerGroupOffsetsResult offsetResult = adminClient
                .listConsumerGroupOffsets(group);
        Map<TopicPartition, OffsetAndMetadata> offsets = offsetResult
                .partitionsToOffsetAndMetadata().get();
        if (offsets != null && !offsets.isEmpty()) {
            String state = description.state().toString();
            if (state.equals("Stable")) {
                rowsWithConsumer = getRowsWithConsumer(description, offsets,
                        members, assignedTps, group);
            }
        }
        List<PartitionAssignmentState> rowsWithoutConsumer =
                getRowsWithoutConsumer(description, offsets,
                        assignedTps, group);
        rowsWithConsumer.addAll(rowsWithoutConsumer);
    }
    return rowsWithConsumer;
}