Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecords#partitions()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecords#partitions() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OffsetCommitSyncPartition.java    From BigData-In-Practice with Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();

    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (TopicPartition partition : records.partitions()) {
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : partitionRecords) {
                    //do some logical processing.
                }
                long lastConsumedOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                consumer.commitSync(Collections.singletonMap(partition,
                        new OffsetAndMetadata(lastConsumedOffset + 1)));
            }
        }
    } finally {
        consumer.close();
    }
}
 
Example 2
Source File: ConsumerInterceptorTTL.java    From kafka_book_demo with Apache License 2.0 6 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    System.out.println("before:" + records);
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            if (now - record.timestamp() < EXPIRE_INTERVAL) {
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 3
Source File: KafkaCanalConnector.java    From canal with Apache License 2.0 6 votes vote down vote up
@Override
public List<FlatMessage> getFlatListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
    waitClientRunning();
    if (!running) {
        return Lists.newArrayList();
    }

    ConsumerRecords<String, String> records = kafkaConsumer2.poll(unit.toMillis(timeout));

    currentOffsets.clear();
    for (TopicPartition topicPartition : records.partitions()) {
        currentOffsets.put(topicPartition.partition(), kafkaConsumer2.position(topicPartition));
    }

    if (!records.isEmpty()) {
        List<FlatMessage> flatMessages = new ArrayList<>();
        for (ConsumerRecord<String, String> record : records) {
            String flatMessageJson = record.value();
            FlatMessage flatMessage = JSON.parseObject(flatMessageJson, FlatMessage.class);
            flatMessages.add(flatMessage);
        }

        return flatMessages;
    }
    return Lists.newArrayList();
}
 
Example 4
Source File: KafkaCanalConnector.java    From canal with Apache License 2.0 6 votes vote down vote up
@Override
public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
    waitClientRunning();
    if (!running) {
        return Lists.newArrayList();
    }

    ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));

    currentOffsets.clear();
    for (TopicPartition topicPartition : records.partitions()) {
        currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
    }

    if (!records.isEmpty()) {
        List<Message> messages = new ArrayList<>();
        for (ConsumerRecord<String, Message> record : records) {
            messages.add(record.value());
        }
        return messages;
    }
    return Lists.newArrayList();
}
 
Example 5
Source File: KafkaDeadLetterBatchErrorHandler.java    From faster-framework-project with Apache License 2.0 6 votes vote down vote up
@Override
public void handle(Exception thrownException, ConsumerRecords<?, ?> data) {
    thrownException.printStackTrace();
    log.error(handleLogMessage(thrownException));
    if (thrownException.getCause() instanceof MethodArgumentNotValidException) {
        return;
    }
    Set<TopicPartition> topicPartitions = data.partitions();
    log.error("send failed message to dead letter");
    for (TopicPartition topicPartition : topicPartitions) {
        List<? extends ConsumerRecord<?, ?>> list = data.records(topicPartition);
        for (ConsumerRecord<?, ?> consumerRecord : list) {
            deadLetterPublishingRecoverer.accept(consumerRecord, thrownException);
        }
    }
    log.error("send failed message to dead letter successful");
}
 
Example 6
Source File: KafkaCanalConnector.java    From canal-1.1.3 with Apache License 2.0 6 votes vote down vote up
@Override
public List<FlatMessage> getFlatListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
    waitClientRunning();
    if (!running) {
        return Lists.newArrayList();
    }

    ConsumerRecords<String, String> records = kafkaConsumer2.poll(unit.toMillis(timeout));

    currentOffsets.clear();
    for (TopicPartition topicPartition : records.partitions()) {
        currentOffsets.put(topicPartition.partition(), kafkaConsumer2.position(topicPartition));
    }

    if (!records.isEmpty()) {
        List<FlatMessage> flatMessages = new ArrayList<>();
        for (ConsumerRecord<String, String> record : records) {
            String flatMessageJson = record.value();
            FlatMessage flatMessage = JSON.parseObject(flatMessageJson, FlatMessage.class);
            flatMessages.add(flatMessage);
        }

        return flatMessages;
    }
    return Lists.newArrayList();
}
 
Example 7
Source File: KafkaCanalConnector.java    From canal-1.1.3 with Apache License 2.0 6 votes vote down vote up
@Override
public List<Message> getListWithoutAck(Long timeout, TimeUnit unit) throws CanalClientException {
    waitClientRunning();
    if (!running) {
        return Lists.newArrayList();
    }

    ConsumerRecords<String, Message> records = kafkaConsumer.poll(unit.toMillis(timeout));

    currentOffsets.clear();
    for (TopicPartition topicPartition : records.partitions()) {
        currentOffsets.put(topicPartition.partition(), kafkaConsumer.position(topicPartition));
    }

    if (!records.isEmpty()) {
        List<Message> messages = new ArrayList<>();
        for (ConsumerRecord<String, Message> record : records) {
            messages.add(record.value());
        }
        return messages;
    }
    return Lists.newArrayList();
}
 
Example 8
Source File: ConsumerInterceptorTTL.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            Headers headers = record.headers();
            long ttl = -1;
            for (Header header : headers) {//判断headers中是否有key为"ttl"的Header
                if (header.key().equalsIgnoreCase("ttl")) {
                    ttl = BytesUtils.bytesToLong(header.value());
                }
            }
            //消息超时判定
            if (ttl > 0 && now - record.timestamp() < ttl * 1000) {
                newTpRecords.add(record);
            } else {//没有设置ttl,无需超时判定
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 9
Source File: KafkaConnector.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private void fillBuffer() {
    ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(100);
    List<ConsumerRecord<byte[], byte[]>> newBuffer = Lists.newLinkedList();
    for (TopicPartition topicPartition : records.partitions()) {
        newBuffer.addAll(records.records(topicPartition));
    }
    this.buffer = newBuffer;
}
 
Example 10
Source File: KafkaSpout.java    From storm_spring_boot_demo with MIT License 5 votes vote down vote up
public void setWaitingToEmit(ConsumerRecords<K,V> consumerRecords) {
    List<ConsumerRecord<K,V>> waitingToEmitList = new LinkedList<>();
    for (TopicPartition tp : consumerRecords.partitions()) {
        waitingToEmitList.addAll(consumerRecords.records(tp));
    }
    waitingToEmit = waitingToEmitList.iterator();
}
 
Example 11
Source File: KafkaRecordsConsumer.java    From synapse with Apache License 2.0 5 votes vote down vote up
private ImmutableMap<String, Duration> updateAndGetDurationBehind(ConsumerRecords<String, String> records) {
    for (final TopicPartition topicPartition : records.partitions()) {
        ConsumerRecord<String, String> lastRecord = getLast(records.records(topicPartition));
        final Instant lastTimestampRead = ofEpochMilli(lastRecord.timestamp());
        durationBehindHandler.update(topicPartition, lastRecord.offset(), lastTimestampRead);
    }

    return durationBehindHandler
            .getChannelDurationBehind()
            .getShardDurationsBehind();
}
 
Example 12
Source File: AbstractKafkaBasedConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Translate the Kafka consumer records if necessary and send the batch of records to destination.
 * @param records the Kafka consumer records
 * @param readTime the instant the records were successfully polled from the Kafka source
 */
protected void translateAndSendBatch(ConsumerRecords<?, ?> records, Instant readTime) {
  // iterate through each topic partition one at a time, for better isolation
  for (TopicPartition topicPartition : records.partitions()) {
    for (ConsumerRecord<?, ?> record : records.records(topicPartition)) {
      try {
        boolean partitionPaused;
        boolean sendFailure;
        synchronized (_sendFailureTopicPartitionExceptionMap) {
          partitionPaused = _autoPausedSourcePartitions.containsKey(topicPartition);
          sendFailure = _sendFailureTopicPartitionExceptionMap.containsKey(topicPartition);
        }
        if (partitionPaused || sendFailure) {
          _logger.warn("Abort sending for {}, auto-paused: {}, send failure: {}, rewind offset", topicPartition,
              partitionPaused, sendFailure);
          seekToLastCheckpoint(Collections.singleton(topicPartition));
          break;
        } else {
          DatastreamProducerRecord datastreamProducerRecord = translate(record, readTime);
          int numBytes = record.serializedKeySize() + record.serializedValueSize();
          sendDatastreamProducerRecord(datastreamProducerRecord, topicPartition, numBytes, null);
        }
      } catch (Exception e) {
        _logger.warn(String.format("Got exception while sending record %s, exception: ", record), e);
        rewindAndPausePartitionOnException(topicPartition, e);
        // skip other messages for this partition, but can continue processing other partitions
        break;
      }
    }
  }
}
 
Example 13
Source File: KafkaConnector.java    From kylin with Apache License 2.0 5 votes vote down vote up
private void fillBuffer() {
    ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(100);
    List<ConsumerRecord<byte[], byte[]>> newBuffer = Lists.newLinkedList();
    for (TopicPartition topicPartition : records.partitions()) {
        newBuffer.addAll(records.records(topicPartition));
    }
    this.buffer = newBuffer;
}
 
Example 14
Source File: ConsumerInterceptorTTL.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            Headers headers = record.headers();
            long ttl = -1;
            for (Header header : headers) {//判断headers中是否有key为"ttl"的Header
                if (header.key().equalsIgnoreCase("ttl")) {
                    ttl = BytesUtils.bytesToLong(header.value());
                }
            }
            //消息超时判定
            if (ttl > 0 && now - record.timestamp() < ttl * 1000) {
                newTpRecords.add(record);
            } else if (ttl < 0) {//没有设置ttl,无需超时判定
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 15
Source File: KafkaMessageConsumer.java    From tcc-transaction with Apache License 2.0 4 votes vote down vote up
@Override
public void afterPropertiesSet() throws Exception {

    Properties props = new Properties();
    props.put("bootstrap.servers", servers);
    props.put("group.id", group);
    props.put("client.id",client);
    props.put("enable.auto.commit", "false");
    props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "io.anyway.galaxy.message.serialization.TransactionMessageDeserializer");

    consumer = new KafkaConsumer<String, TransactionMessage>(props);
    //定义事务处理的topic
    consumer.subscribe(Arrays.asList("galaxy-tx-message"));

    if(logger.isInfoEnabled()){
        logger.info("crete kafka consumer: "+consumer+" ,subscribe topic: galaxy-tx-message");
    }

    final Thread thread= new Thread(){
        @Override
        public void run() {
            for (; running; ) {
                try {
                    ConsumerRecords<String, TransactionMessage> records = consumer.poll(timeout);
                    for (TopicPartition partition : records.partitions()) {
                        List<ConsumerRecord<String, TransactionMessage>> partitionRecords = records.records(partition);
                        for (ConsumerRecord<String, TransactionMessage> each : partitionRecords) {
                            if(logger.isInfoEnabled()){
                                logger.info("kafka receive message: "+"{topic:"+each.topic()+",partition:"+partition.partition()+",offset:"+each.offset()+",value:"+each.value()+"}");
                            }
                            if (transactionMessageService.isValidMessage(each.value())) {
                                transactionMessageService.asyncHandleMessage(each.value());
                            }
                        }
                        //同步设置offset
                        long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                        Map<TopicPartition, OffsetAndMetadata> offsets = Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1));
                        consumer.commitSync(offsets);
                        if (logger.isInfoEnabled()) {
                            logger.info("application group: " + group + " has committed offset: " + offsets);
                        }
                    }
                } catch (Throwable e) {
                    logger.error("Consumer message failed ", e);
                    try {
                        Thread.sleep(5000);
                    } catch (InterruptedException e1) {
                        // e1.printStackTrace();
                    }
                }
            }
        }
    };
    thread.setDaemon(true);
    thread.start();
}
 
Example 16
Source File: KafkaMessageConsumer.java    From galaxy with Apache License 2.0 4 votes vote down vote up
@Override
public void afterPropertiesSet() throws Exception {

    Properties props = new Properties();
    props.put("bootstrap.servers", servers);
    props.put("group.id", group);
    props.put("client.id",client);
    props.put("enable.auto.commit", "false");
    props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "io.anyway.galaxy.message.serialization.TransactionMessageDeserializer");

    consumer = new KafkaConsumer<String, TransactionMessage>(props);
    //定义事务处理的topic
    consumer.subscribe(Arrays.asList("galaxy-tx-message"));

    if(logger.isInfoEnabled()){
        logger.info("crete kafka consumer: "+consumer+" ,subscribe topic: galaxy-tx-message");
    }

    final Thread thread= new Thread(){
        @Override
        public void run() {
            for (; running; ) {
                try {
                    ConsumerRecords<String, TransactionMessage> records = consumer.poll(timeout);
                    for (TopicPartition partition : records.partitions()) {
                        List<ConsumerRecord<String, TransactionMessage>> partitionRecords = records.records(partition);
                        for (ConsumerRecord<String, TransactionMessage> each : partitionRecords) {
                            if(logger.isInfoEnabled()){
                                logger.info("kafka receive message: "+"{topic:"+each.topic()+",partition:"+partition.partition()+",offset:"+each.offset()+",value:"+each.value()+"}");
                            }
                            if (transactionMessageService.isValidMessage(each.value())) {
                                transactionMessageService.asyncHandleMessage(each.value());
                            }
                        }
                        //同步设置offset
                        long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                        Map<TopicPartition, OffsetAndMetadata> offsets = Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1));
                        consumer.commitSync(offsets);
                        if (logger.isInfoEnabled()) {
                            logger.info("application group: " + group + " has committed offset: " + offsets);
                        }
                    }
                } catch (Throwable e) {
                    logger.error("Consumer message failed ", e);
                    try {
                        Thread.sleep(5000);
                    } catch (InterruptedException e1) {
                        // e1.printStackTrace();
                    }
                }
            }
        }
    };
    thread.setDaemon(true);
    thread.start();
}
 
Example 17
Source File: TracingConsumerInterceptor.java    From brave-kafka-interceptor with Apache License 2.0 4 votes vote down vote up
@Override public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
  if (records.isEmpty() || tracing.isNoop()) return records;
  Map<String, Span> consumerSpansForTopic = new LinkedHashMap<>();
  for (TopicPartition partition : records.partitions()) {
    String topic = partition.topic();
    List<ConsumerRecord<K, V>> recordsInPartition = records.records(partition);
    for (ConsumerRecord<K, V> record : recordsInPartition) {
      TraceContextOrSamplingFlags extracted = extractor.extract(record.headers());
      // If we extracted neither a trace context, nor request-scoped data
      // (extra),
      // make or reuse a span for this topic
      if (extracted.samplingFlags() != null && extracted.extra().isEmpty()) {
        Span consumerSpanForTopic = consumerSpansForTopic.get(topic);
        if (consumerSpanForTopic == null) {
          consumerSpansForTopic.put(topic,
            consumerSpanForTopic = tracing.tracer()
              .nextSpan(extracted)
              .name(SPAN_NAME)
              .kind(Span.Kind.CONSUMER)
              .remoteServiceName(remoteServiceName)
              .tag(KafkaInterceptorTagKey.KAFKA_TOPIC, topic)
              .tag(KafkaInterceptorTagKey.KAFKA_GROUP_ID,
                configuration.getString(ConsumerConfig.GROUP_ID_CONFIG))
              .tag(KafkaInterceptorTagKey.KAFKA_CLIENT_ID,
                configuration.getString(ConsumerConfig.CLIENT_ID_CONFIG))
              .start());
        }
        // no need to remove propagation headers as we failed to extract
        // anything
        injector.inject(consumerSpanForTopic.context(), record.headers());
      } else { // we extracted request-scoped data, so cannot share a consumer
        // span.
        Span span = tracing.tracer().nextSpan(extracted);
        if (!span.isNoop()) {
          span.name(SPAN_NAME)
            .kind(Span.Kind.CONSUMER)
            .remoteServiceName(remoteServiceName)
            .tag(KafkaInterceptorTagKey.KAFKA_TOPIC, topic)
            .tag(KafkaInterceptorTagKey.KAFKA_GROUP_ID,
              configuration.getString(ConsumerConfig.GROUP_ID_CONFIG))
            .tag(KafkaInterceptorTagKey.KAFKA_CLIENT_ID,
              configuration.getString(ConsumerConfig.CLIENT_ID_CONFIG))
            .start()
            .finish(); // span won't be shared by other records
        }
        // remove prior propagation headers from the record
        tracing.propagation().keys().forEach(key -> record.headers().remove(key));
        injector.inject(span.context(), record.headers());
      }
    }
  }
  consumerSpansForTopic.values().forEach(Span::finish);
  return records;
}
 
Example 18
Source File: TransactionalWordCount.java    From tutorials with MIT License 2 votes vote down vote up
public static void main(String[] args) {

        KafkaConsumer<String, String> consumer = createKafkaConsumer();
        KafkaProducer<String, String> producer = createKafkaProducer();

        producer.initTransactions();

        try {

            while (true) {

                ConsumerRecords<String, String> records = consumer.poll(ofSeconds(60));

                Map<String, Integer> wordCountMap = records.records(new TopicPartition(INPUT_TOPIC, 0))
                        .stream()
                        .flatMap(record -> Stream.of(record.value().split(" ")))
                        .map(word -> Tuple.of(word, 1))
                        .collect(Collectors.toMap(tuple -> tuple.getKey(), t1 -> t1.getValue(), (v1, v2) -> v1 + v2));

                producer.beginTransaction();

                wordCountMap.forEach((key, value) -> producer.send(new ProducerRecord<String, String>(OUTPUT_TOPIC, key, value.toString())));

                Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();

                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionedRecords = records.records(partition);
                    long offset = partitionedRecords.get(partitionedRecords.size() - 1).offset();

                    offsetsToCommit.put(partition, new OffsetAndMetadata(offset + 1));
                }

                producer.sendOffsetsToTransaction(offsetsToCommit, CONSUMER_GROUP_ID);
                producer.commitTransaction();

            }

        } catch (KafkaException e) {

            producer.abortTransaction();

        }


    }