Java Code Examples for org.apache.kafka.clients.consumer.CommitFailedException

The following examples show how to use org.apache.kafka.clients.consumer.CommitFailedException. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: common-kafka   Source File: ProcessingKafkaConsumer.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Commits all committable offsets
 *
 * @throws KafkaException
 *          if there is an issue committing offsets to Kafka
 */
public synchronized void commitOffsets() {
    if (pauseCommit) {
        LOGGER.debug("Commits are paused until we poll() again");
        return;
    }

    LOGGER.debug("committing offsets");
    try {
        commitOffsets(getCommittableOffsets());
    } catch(CommitFailedException e) {
        LOGGER.debug("Failed to commit offsets, pausing commits until next poll", e);
        pauseCommit = true;
        throw e;
    }
}
 
Example 2
Source Project: ja-micro   Source File: OffsetCommitter.java    License: Apache License 2.0 6 votes vote down vote up
public void recommitOffsets() {
    LocalDateTime now = LocalDateTime.now(clock);
    if (now.isAfter(lastUpdateTime.plus(IDLE_DURATION))) {
        for (TopicPartition tp : offsetData.keySet()) {
            OffsetAndTime offsetAndTime = offsetData.get(tp);
            if (now.isAfter(offsetAndTime.time.plus(IDLE_DURATION))) {
                try {
                    consumer.commitSync(Collections.singletonMap(tp,
                            new OffsetAndMetadata(offsetAndTime.offset)));
                } catch (CommitFailedException covfefe) {
                    logger.info("Caught CommitFailedException attempting to commit {} {}",
                            tp, offsetAndTime.offset);
                }
                offsetAndTime.time = now;
            }
        }
        lastUpdateTime = now;
    }
}
 
Example 3
Source Project: ad   Source File: MyConsumer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * 手动同步提交消息位移
 */
private static void generalConsumerMessageSyncCommit() {
    properties.put("auto.commit.offset", "false");
    consumer = new KafkaConsumer<>(properties);
    consumer.subscribe(Collections.singleton("kafka-topic"));
    try {
        while (true) {
            boolean flag = true;
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records) {
                log.debug(String.format("topic = %s, partition = %s, key = %s, value = %s",
                        record.topic(), record.partition(), record.key(), record.value())
                );
                if (StringUtils.endsWithIgnoreCase("done", record.value())) {
                    flag = false;
                }
            }
            try {
                // 发起提交, 当前线程会阻塞, 如果发生异常会进行重试直到成功或者抛出 CommitFailedException
                consumer.commitSync();
            } catch (CommitFailedException e) {
                log.error("commit failed error: {}", e.getMessage());
            }
            if (!flag) {
                break;
            }
        }
    } finally {
        consumer.close();
    }
}
 
Example 4
Source Project: ja-micro   Source File: KafkaSubscriberTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void subscriberLosesPartitionAssignment() {
    KafkaSubscriber<String> subscriber = new KafkaSubscriber<>(new MessageCallback(),
            "topic", "groupId", false,
            KafkaSubscriber.OffsetReset.Earliest, 1, 1, 1,
            5000, 5000, KafkaSubscriber.QueueType.OffsetBlocking, 1000);
    KafkaTopicInfo message1 = new KafkaTopicInfo("topic", 0, 1, null);
    KafkaTopicInfo message2 = new KafkaTopicInfo("topic", 0, 2, null);
    KafkaTopicInfo message3 = new KafkaTopicInfo("topic", 1, 1, null);
    KafkaTopicInfo message4 = new KafkaTopicInfo("topic", 1, 2, null);
    subscriber.consume(message1);
    subscriber.consume(message2);
    subscriber.consume(message3);
    subscriber.consume(message4);
    KafkaConsumer realConsumer = mock(KafkaConsumer.class);
    class ArgMatcher implements ArgumentMatcher<Map<TopicPartition, OffsetAndMetadata>> {
        @Override
        public boolean matches(Map<TopicPartition, OffsetAndMetadata> arg) {
            OffsetAndMetadata oam = arg.values().iterator().next();
            return oam.offset() == 3;
        }
    }
    doThrow(new CommitFailedException()).when(realConsumer).commitSync(argThat(new ArgMatcher()));
    subscriber.realConsumer = realConsumer;
    subscriber.offsetCommitter = new OffsetCommitter(realConsumer, Clock.systemUTC());
    subscriber.consumeMessages();
}
 
Example 5
Source Project: datacollector   Source File: BaseKafkaConsumer09.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void commit() {
  synchronized (pollCommitMutex) {
    // While rebalancing there is no point for us to commit offset since it's not allowed operation
    if(rebalanceInProgress.get()) {
      LOG.debug("Kafka is rebalancing, not commiting offsets");
      return;
    }

    if(needToCallPoll.get()) {
      LOG.debug("Waiting on poll to be properly called before continuing.");
      return;
    }

    try {
      if(topicPartitionToOffsetMetadataMap.isEmpty()) {
        LOG.debug("Skipping committing offsets since we haven't consume anything.");
        return;
      }

      LOG.debug("Committing offsets: {}", topicPartitionToOffsetMetadataMap.toString());
      kafkaConsumer.commitSync(topicPartitionToOffsetMetadataMap);
    } catch(CommitFailedException ex) {
      LOG.warn("Can't commit offset to Kafka: {}", ex.toString(), ex);
      // After CommitFailedException we MUST call consumer's poll() method first
      needToCallPoll.set(true);
      // The consumer thread might be stuck on writing to the queue, so we need to clean it up to unblock that thread
      recordQueue.clear();
    } finally {
      // either we've committed the offsets (so now we drop them so that we don't re-commit anything)
      // or CommitFailedException was thrown, in which case poll needs to be called again and they are invalid
      topicPartitionToOffsetMetadataMap.clear();
    }
  }
}
 
Example 6
Source Project: secor   Source File: SecorKafkaMessageIterator.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void commit(com.pinterest.secor.common.TopicPartition topicPartition, long offset) {
    TopicPartition kafkaTopicPartition = new TopicPartition(topicPartition.getTopic(), topicPartition.getPartition());
    OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset);

    try {
        LOG.info("committing {} offset {} to kafka", topicPartition, offset);
        mKafkaConsumer.commitSync(ImmutableMap.of(kafkaTopicPartition, offsetAndMetadata));
    } catch (CommitFailedException e) {
        LOG.trace("kafka commit failed due to group re-balance", e);
    }
}
 
Example 7
Source Project: rest-utils   Source File: KafkaExceptionMapperTest.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testKafkaExceptions() {
  //exceptions mapped in KafkaExceptionMapper
  verifyMapperResponse(new BrokerNotAvailableException("some message"), Status.SERVICE_UNAVAILABLE,
      BROKER_NOT_AVAILABLE_ERROR_CODE);

  verifyMapperResponse(new InvalidReplicationFactorException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new SecurityDisabledException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new UnsupportedVersionException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new InvalidPartitionsException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new InvalidRequestException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new UnknownServerException("some message"),Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new UnknownTopicOrPartitionException("some message"), Status.NOT_FOUND,
      KAFKA_UNKNOWN_TOPIC_PARTITION_CODE);
  verifyMapperResponse(new PolicyViolationException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new TopicExistsException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new InvalidConfigurationException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);

  //test couple of retriable exceptions
  verifyMapperResponse(new NotCoordinatorException("some message"), Status.INTERNAL_SERVER_ERROR,
      KAFKA_RETRIABLE_ERROR_ERROR_CODE);
  verifyMapperResponse(new NotEnoughReplicasException("some message"), Status.INTERNAL_SERVER_ERROR,
      KAFKA_RETRIABLE_ERROR_ERROR_CODE);

  //test couple of kafka exception
  verifyMapperResponse(new CommitFailedException(), Status.INTERNAL_SERVER_ERROR,
      KAFKA_ERROR_ERROR_CODE);
  verifyMapperResponse(new ConcurrentTransactionsException("some message"), Status.INTERNAL_SERVER_ERROR,
      KAFKA_ERROR_ERROR_CODE);

  //test few general exceptions
  verifyMapperResponse(new NullPointerException("some message"), Status.INTERNAL_SERVER_ERROR,
      Status.INTERNAL_SERVER_ERROR.getStatusCode());
  verifyMapperResponse(new IllegalArgumentException("some message"), Status.INTERNAL_SERVER_ERROR,
      Status.INTERNAL_SERVER_ERROR.getStatusCode());
}