Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#headers()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecord#headers() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConsumerRecordEntryPointInterceptor.java    From pinpoint with Apache License 2.0 6 votes vote down vote up
@Override
public Trace createTrace(TraceContext traceContext, ConsumerRecord consumerRecord) {
    org.apache.kafka.common.header.Headers headers = consumerRecord.headers();
    if (headers == null) {
        return null;
    }

    if (!isSampled(headers)) {
        // Even if this transaction is not a sampling target, we have to create Trace object to mark 'not sampling'.
        // For example, if this transaction invokes rpc call, we can add parameter to tell remote node 'don't sample this transaction'
        final Trace trace = traceContext.disableSampling();
        if (isDebug) {
            logger.debug("remotecall sampling flag found. skip trace");
        }
        return trace;
    }

    TraceId traceId = populateTraceIdFromHeaders(traceContext, headers);
    if (traceId != null) {
        return createContinueTrace(traceContext, consumerRecord, traceId);
    } else {
        return createTrace0(traceContext, consumerRecord);
    }
}
 
Example 2
Source File: KafkaHeaderBinder.java    From micronaut-kafka with Apache License 2.0 6 votes vote down vote up
@Override
public BindingResult<T> bind(ArgumentConversionContext<T> context, ConsumerRecord<?, ?> source) {
    Headers headers = source.headers();
    AnnotationMetadata annotationMetadata = context.getAnnotationMetadata();

    String name = annotationMetadata.getValue(Header.class, "name", String.class)
                                    .orElseGet(() -> annotationMetadata.getValue(Header.class, String.class)
                                                                       .orElse(context.getArgument().getName()));
    Iterable<org.apache.kafka.common.header.Header> value = headers.headers(name);

    if (value.iterator().hasNext()) {
        Optional<T> converted = ConversionService.SHARED.convert(value, context);
        return () -> converted;
    } else if (context.getArgument().getType() == Optional.class) {
        //noinspection unchecked
        return () -> (Optional<T>) Optional.of(Optional.empty());
    } else {
        //noinspection unchecked
        return BindingResult.EMPTY;
    }
}
 
Example 3
Source File: KafkaConsumerHelper.java    From zerocode with Apache License 2.0 6 votes vote down vote up
public static void readJson(List<ConsumerJsonRecord> jsonRecords,
                            Iterator recordIterator) throws IOException {
    while (recordIterator.hasNext()) {
        ConsumerRecord thisRecord = (ConsumerRecord) recordIterator.next();

        Object key = thisRecord.key();
        Object value = thisRecord.value();
        Headers headers = thisRecord.headers();
        LOGGER.info("\nRecord Key - {} , Record value - {}, Record partition - {}, Record offset - {}, Headers - {}",
                key, value, thisRecord.partition(), thisRecord.offset(), headers);

        JsonNode valueNode = objectMapper.readTree(value.toString());
        Map<String, String> headersMap = null;
        if (headers != null) {
            headersMap = new HashMap<>();
            for (Header header : headers) {
                headersMap.put(header.key(), new String(header.value()));
            }
        }
        ConsumerJsonRecord jsonRecord = new ConsumerJsonRecord(thisRecord.key(), null, valueNode, headersMap);
        jsonRecords.add(jsonRecord);
    }
}
 
Example 4
Source File: KafkaAvroSerDesWithKafkaServerTest.java    From registry with Apache License 2.0 6 votes vote down vote up
private void _testByStoringSchemaIdInHeaderOrPayload(String topicName, Object msg, boolean storeSchemaIdInHeader) throws InterruptedException {
    createTopic(topicName);
    try {
        String bootstrapServers = produceMessage(topicName, msg, storeSchemaIdInHeader);

        String consumerGroup = topicName + "-group-" + new Random().nextLong();
        ConsumerRecords<String, Object> consumerRecords = consumeMessage(topicName, bootstrapServers, consumerGroup);

        Assert.assertEquals(1, consumerRecords.count());

        ConsumerRecord<String, Object> consumerRecord = consumerRecords.iterator().next();
        final Headers headers = consumerRecord.headers();
        Assert.assertEquals(storeSchemaIdInHeader, headers.lastHeader(KafkaAvroSerde.DEFAULT_KEY_SCHEMA_VERSION_ID) != null);
        Assert.assertEquals(storeSchemaIdInHeader, headers.lastHeader(KafkaAvroSerde.DEFAULT_VALUE_SCHEMA_VERSION_ID) != null);

        Object value = consumerRecord.value();
        Assert.assertEquals(getKey(msg), consumerRecord.key());
        AvroSchemaRegistryClientUtil.assertAvroObjs(msg, value);
    } finally {
        CLUSTER.deleteTopicAndWait(topicName);
    }
}
 
Example 5
Source File: KafkaRecordsStorage.java    From liiklus with MIT License 6 votes vote down vote up
private Envelope toEnvelope(String topic, ConsumerRecord<ByteBuffer, ByteBuffer> record) {
    var headers = new HashMap<String, String>();
    for (Header header : record.headers()) {
        headers.put(header.key(), new String(header.value()));
    }

    return toEnvelope(
            topic,
            record.key(),
            record.value(),
            headers
    );
}
 
Example 6
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
private Map<String, String> getAttributes(final ConsumerRecord<?, ?> consumerRecord) {
    final Map<String, String> attributes = new HashMap<>();
    if (headerNamePattern == null) {
        return attributes;
    }

    for (final Header header : consumerRecord.headers()) {
        final String attributeName = header.key();
        final byte[] attributeValue = header.value();
        if (headerNamePattern.matcher(attributeName).matches() && attributeValue != null) {
            attributes.put(attributeName, new String(attributeValue, headerCharacterSet));
        }
    }

    return attributes;
}
 
Example 7
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
private Map<String, String> getAttributes(final ConsumerRecord<?, ?> consumerRecord) {
    final Map<String, String> attributes = new HashMap<>();
    if (headerNamePattern == null) {
        return attributes;
    }

    for (final Header header : consumerRecord.headers()) {
        final String attributeName = header.key();
        final byte[] attributeValue = header.value();
        if (headerNamePattern.matcher(attributeName).matches() && attributeValue != null) {
            attributes.put(attributeName, new String(attributeValue, headerCharacterSet));
        }
    }

    return attributes;
}
 
Example 8
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
private Map<String, String> getAttributes(final ConsumerRecord<?, ?> consumerRecord) {
    final Map<String, String> attributes = new HashMap<>();
    if (headerNamePattern == null) {
        return attributes;
    }

    for (final Header header : consumerRecord.headers()) {
        final String attributeName = header.key();
        final byte[] attributeValue = header.value();
        if (headerNamePattern.matcher(attributeName).matches() && attributeValue != null) {
            attributes.put(attributeName, new String(attributeValue, headerCharacterSet));
        }
    }

    return attributes;
}
 
Example 9
Source File: KafkaRecordCoder.java    From DataflowTemplates with Apache License 2.0 5 votes vote down vote up
private Object toHeaders(Iterable<KV<String, byte[]>> records) {
  if (!ConsumerSpEL.hasHeaders) {
    return null;
  }

  // ConsumerRecord is used to simply create a list of headers
  ConsumerRecord<String, String> consumerRecord = new ConsumerRecord<>("", 0, 0L, "", "");
  records.forEach(kv -> consumerRecord.headers().add(kv.getKey(), kv.getValue()));
  return consumerRecord.headers();
}
 
Example 10
Source File: KafkaUtils.java    From loc-framework with MIT License 5 votes vote down vote up
static String getMessage(byte[] value, String prefix, ConsumerRecord<?, ?> record) {
  String message = new String(value, StandardCharsets.UTF_8);
  StringBuffer sb = new StringBuffer();
  sb.append(prefix);
  sb.append("; topic is ").append(record.topic());
  sb.append("; partition is ").append(record.partition());
  sb.append("; offset is ").append(record.offset());
  if (record.headers() != null) {
    sb.append("; header is ");
    Arrays.stream(record.headers().toArray()).forEach(h -> sb.append(h.key()).append("=")
        .append(new String(h.value(), StandardCharsets.UTF_8)));
  }
  sb.append("; message is ").append(toDisplayString(message, 1024));
  return sb.toString();
}
 
Example 11
Source File: KafkaEasyTransMsgConsumerImpl.java    From EasyTransaction with Apache License 2.0 5 votes vote down vote up
private void reconsumeLater(ConsumerRecord<String, byte[]> consumeRecord) throws InterruptedException, ExecutionException {

		// add all header to headList except RETRY_COUNT
		Headers headers = consumeRecord.headers();
		List<Header> headerList = new ArrayList<Header>(8);
		Iterator<Header> iterator = headers.iterator();
		Integer retryCount = -1;
		boolean hasOrignalHeader = false;
		while (iterator.hasNext()) {
			Header next = iterator.next();
			if (next.key().equals(RETRY_COUNT_KEY)) {
				retryCount = serializer.deserialize(next.value());
				continue;
			}
			
			if(next.key().equals(ORGINAL_TOPIC)){
				hasOrignalHeader = true;
			}
			headerList.add(next);
		}
		
		// add RETRY_COUNT to header
		retryCount++;
		headerList.add(new RecordHeader(RETRY_COUNT_KEY, serializer.serialization(retryCount)));
		
		if(!hasOrignalHeader){
			headerList.add(new RecordHeader(ORGINAL_TOPIC, serializer.serialization(consumeRecord.topic())));
		}

		// send message to corresponding queue according to retry times
		String retryTopic = calcRetryTopic(consumeRecord.topic(), retryCount);
		
		ProducerRecord<String, byte[]> record = new ProducerRecord<>(retryTopic,
				consumeRecord.partition() % retryQueuePartitionCount.get(retryTopic), null, consumeRecord.key(),
				consumeRecord.value(), headerList);
		Future<RecordMetadata> publishKafkaMessage = retryQueueMsgProducer.publishKafkaMessage(record);
		publishKafkaMessage.get();
	}
 
Example 12
Source File: DefaultKafkaMessageConverter.java    From extension-kafka with Apache License 2.0 5 votes vote down vote up
@Override
public Optional<EventMessage<?>> readKafkaMessage(ConsumerRecord<String, byte[]> consumerRecord) {
    try {
        Headers headers = consumerRecord.headers();
        if (isAxonMessage(headers)) {
            byte[] messageBody = consumerRecord.value();
            SerializedMessage<?> message = extractSerializedMessage(headers, messageBody);
            return buildMessage(headers, message);
        }
    } catch (Exception e) {
        logger.trace("Error converting ConsumerRecord [{}] to an EventMessage", consumerRecord, e);
    }

    return Optional.empty();
}
 
Example 13
Source File: ProducerRecordCoder.java    From beam with Apache License 2.0 5 votes vote down vote up
private Object toHeaders(Iterable<KV<String, byte[]>> records) {
  if (!ConsumerSpEL.hasHeaders()) {
    return null;
  }

  // ConsumerRecord is used to simply create a list of headers
  ConsumerRecord<String, String> consumerRecord = new ConsumerRecord<>("", 0, 0L, "", "");
  records.forEach(kv -> consumerRecord.headers().add(kv.getKey(), kv.getValue()));
  return consumerRecord.headers();
}
 
Example 14
Source File: ConsumerInterceptorTTL.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            Headers headers = record.headers();
            long ttl = -1;
            for (Header header : headers) {//判断headers中是否有key为"ttl"的Header
                if (header.key().equalsIgnoreCase("ttl")) {
                    ttl = BytesUtils.bytesToLong(header.value());
                }
            }
            //消息超时判定
            if (ttl > 0 && now - record.timestamp() < ttl * 1000) {
                newTpRecords.add(record);
            } else {//没有设置ttl,无需超时判定
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 15
Source File: ConsumerRecordsIteratorWrapper.java    From apm-agent-java with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecord next() {
    endCurrentTransaction();
    ConsumerRecord record = delegate.next();
    try {
        String topic = record.topic();
        if (!WildcardMatcher.isAnyMatch(messagingConfiguration.getIgnoreMessageQueues(), topic)) {
            Transaction transaction = tracer.startChildTransaction(record, KafkaRecordHeaderAccessor.instance(), ConsumerRecordsIteratorWrapper.class.getClassLoader());
            if (transaction != null) {
                transaction.withType("messaging").withName("Kafka record from " + topic).activate();
                transaction.setFrameworkName(FRAMEWORK_NAME);

                Message message = transaction.getContext().getMessage();
                message.withQueue(topic);
                if (record.timestampType() == TimestampType.CREATE_TIME) {
                    message.withAge(System.currentTimeMillis() - record.timestamp());
                }

                if (transaction.isSampled() && coreConfiguration.isCaptureHeaders()) {
                    for (Header header : record.headers()) {
                        String key = header.key();
                        if (!TraceContext.TRACE_PARENT_BINARY_HEADER_NAME.equals(key) &&
                            WildcardMatcher.anyMatch(coreConfiguration.getSanitizeFieldNames(), key) == null) {
                            message.addHeader(key, header.value());
                        }
                    }
                }

                if (transaction.isSampled() && coreConfiguration.getCaptureBody() != CoreConfiguration.EventType.OFF) {
                    message.appendToBody("key=").appendToBody(String.valueOf(record.key())).appendToBody("; ")
                        .appendToBody("value=").appendToBody(String.valueOf(record.value()));
                }
            }
        }
    } catch (Exception e) {
        logger.error("Error in transaction creation based on Kafka record", e);
    }
    return record;
}
 
Example 16
Source File: ConsumerInterceptorTTL.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            Headers headers = record.headers();
            long ttl = -1;
            for (Header header : headers) {//判断headers中是否有key为"ttl"的Header
                if (header.key().equalsIgnoreCase("ttl")) {
                    ttl = BytesUtils.bytesToLong(header.value());
                }
            }
            //消息超时判定
            if (ttl > 0 && now - record.timestamp() < ttl * 1000) {
                newTpRecords.add(record);
            } else if (ttl < 0) {//没有设置ttl,无需超时判定
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 17
Source File: MessageListenerThread.java    From core-ng-project with Apache License 2.0 4 votes vote down vote up
private <T> void handle(String topic, MessageProcess<T> process, List<ConsumerRecord<byte[], byte[]>> records, double longProcessThresholdInNano) {
    for (ConsumerRecord<byte[], byte[]> record : records) {
        ActionLog actionLog = logManager.begin("=== message handling begin ===");
        try {
            actionLog.action("topic:" + topic);
            actionLog.context("topic", topic);
            actionLog.context("handler", process.handler.getClass().getCanonicalName());
            actionLog.track("kafka", 0, 1, 0);

            Headers headers = record.headers();
            if ("true".equals(header(headers, MessageHeaders.HEADER_TRACE))) actionLog.trace = true;
            String correlationId = header(headers, MessageHeaders.HEADER_CORRELATION_ID);
            if (correlationId != null) actionLog.correlationIds = List.of(correlationId);
            String client = header(headers, MessageHeaders.HEADER_CLIENT);
            if (client != null) actionLog.clients = List.of(client);
            String refId = header(headers, MessageHeaders.HEADER_REF_ID);
            if (refId != null) actionLog.refIds = List.of(refId);
            logger.debug("[header] refId={}, client={}, correlationId={}", refId, client, correlationId);

            String key = key(record);
            actionLog.context("key", key);

            long timestamp = record.timestamp();
            logger.debug("[message] timestamp={}", timestamp);
            long lag = actionLog.date.toEpochMilli() - timestamp;
            actionLog.stat("consumer_lag_in_ms", lag);
            checkConsumerLag(lag, longConsumerLagThresholdInMs);

            byte[] value = record.value();
            logger.debug("[message] value={}", new BytesLogParam(value));
            T message = process.mapper.fromJSON(value);
            process.validator.validate(message, false);
            process.handler.handle(key, message);
        } catch (Throwable e) {
            logManager.logError(e);
        } finally {
            long elapsed = actionLog.elapsed();
            checkSlowProcess(elapsed, longProcessThresholdInNano);
            logManager.end("=== message handling end ===");
        }
    }
}
 
Example 18
Source File: KafkaUnboundedReader.java    From DataflowTemplates with Apache License 2.0 4 votes vote down vote up
@Override
public boolean advance() throws IOException {
  /* Read first record (if any). we need to loop here because :
   *  - (a) some records initially need to be skipped if they are before consumedOffset
   *  - (b) if curBatch is empty, we want to fetch next batch and then advance.
   *  - (c) curBatch is an iterator of iterators. we interleave the records from each.
   *        curBatch.next() might return an empty iterator.
   */
  while (true) {
    if (curBatch.hasNext()) {
      PartitionState<K, V> pState = curBatch.next();

      if (!pState.recordIter.hasNext()) { // -- (c)
        pState.recordIter = Collections.emptyIterator(); // drop ref
        curBatch.remove();
        continue;
      }

      elementsRead.inc();
      elementsReadBySplit.inc();

      ConsumerRecord<byte[], byte[]> rawRecord = pState.recordIter.next();
      long expected = pState.nextOffset;
      long offset = rawRecord.offset();

      if (offset < expected) { // -- (a)
        // this can happen when compression is enabled in Kafka (seems to be fixed in 0.10)
        // should we check if the offset is way off from consumedOffset (say > 1M)?
        LOG.warn(
            "{}: ignoring already consumed offset {} for {}",
            this,
            offset,
            pState.topicPartition);
        continue;
      }

      long offsetGap = offset - expected; // could be > 0 when Kafka log compaction is enabled.

      if (curRecord == null) {
        LOG.info("{}: first record offset {}", name, offset);
        offsetGap = 0;
      }

      // Apply user deserializers. User deserializers might throw, which will be propagated up
      // and 'curRecord' remains unchanged. The runner should close this reader.
      // TODO: write records that can't be deserialized to a "dead-letter" additional output.
      KafkaRecord<K, V> record =
          new KafkaRecord<>(
              rawRecord.topic(),
              rawRecord.partition(),
              rawRecord.offset(),
              consumerSpEL.getRecordTimestamp(rawRecord),
              consumerSpEL.getRecordTimestampType(rawRecord),
              ConsumerSpEL.hasHeaders ? rawRecord.headers() : null,
              keyDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.key()),
              valueDeserializerInstance.deserialize(rawRecord.topic(), rawRecord.value()));

      curTimestamp =
          pState.timestampPolicy.getTimestampForRecord(pState.mkTimestampPolicyContext(), record);
      curRecord = record;

      int recordSize =
          (rawRecord.key() == null ? 0 : rawRecord.key().length)
              + (rawRecord.value() == null ? 0 : rawRecord.value().length);
      pState.recordConsumed(offset, recordSize, offsetGap);
      bytesRead.inc(recordSize);
      bytesReadBySplit.inc(recordSize);
      return true;

    } else { // -- (b)
      nextBatch();

      if (!curBatch.hasNext()) {
        return false;
      }
    }
  }
}
 
Example 19
Source File: KafkaSourceTask.java    From MirrorTool-for-Kafka-Connect with Apache License 2.0 4 votes vote down vote up
@Override
public List<SourceRecord> poll() {
  if (logger.isDebugEnabled())
    logger.debug("{}: poll()", this);
  synchronized (stopLock) {
    if (!stop.get())
      poll.set(true);
  }
  ArrayList<SourceRecord> records = new ArrayList<>();
  if (poll.get()) {
    try {
      ConsumerRecords<byte[], byte[]> krecords = consumer.poll(Duration.ofMillis(pollTimeout));
      if (logger.isDebugEnabled())
        logger.debug("{}: Got {} records from source.", this, krecords.count());
      for (ConsumerRecord<byte[], byte[]> krecord : krecords) {
        Map<String, String> sourcePartition = Collections.singletonMap(TOPIC_PARTITION_KEY,
            krecord.topic().concat(":").concat(Integer.toString(krecord.partition())));
        Map<String, Long> sourceOffset = Collections.singletonMap(OFFSET_KEY, krecord.offset());
        String sourceTopic = krecord.topic();
        String destinationTopic = sourceTopic;
        byte[] recordKey = krecord.key();
        byte[] recordValue = krecord.value();
        long recordTimestamp = krecord.timestamp();
        if (logger.isDebugEnabled()) {
          logger.trace(
              "Task: sourceTopic:{} sourcePartition:{} sourceOffSet:{} destinationTopic:{}, key:{}, valueSize:{}",
              sourceTopic, krecord.partition(), krecord.offset(), destinationTopic, recordKey,
              krecord.serializedValueSize());
        }
        if (includeHeaders) {
          // Mapping from source type: org.apache.kafka.common.header.Headers, to
          // destination type: org.apache.kafka.connect.Headers
          Headers sourceHeaders = krecord.headers();
          ConnectHeaders destinationHeaders = new ConnectHeaders();
          for (Header header : sourceHeaders) {
            if (header != null) {
              destinationHeaders.add(header.key(), header.value(), Schema.OPTIONAL_BYTES_SCHEMA);
            }
          }
          records.add(
              new SourceRecord(sourcePartition, sourceOffset, destinationTopic, null, Schema.OPTIONAL_BYTES_SCHEMA,
                  recordKey, Schema.OPTIONAL_BYTES_SCHEMA, recordValue, recordTimestamp, destinationHeaders));
        } else {
          records.add(new SourceRecord(sourcePartition, sourceOffset, destinationTopic, null,
              Schema.OPTIONAL_BYTES_SCHEMA, recordKey, Schema.OPTIONAL_BYTES_SCHEMA, recordValue, recordTimestamp));
        }
      }
    } catch (WakeupException e) {
      logger.info("{}: Caught WakeupException. Probably shutting down.", this);
    }
  }
  poll.set(false);
  // If stop has been set processing, then stop the consumer.
  if (stop.get()) {
    logger.debug("{}: stop flag set during poll(), opening stopLatch", this);
    stopLatch.countDown();
  }
  if (logger.isDebugEnabled())
    logger.debug("{}: Returning {} records to connect", this, records.size());
  return records;
}
 
Example 20
Source File: KafkaHeadersBinder.java    From micronaut-kafka with Apache License 2.0 4 votes vote down vote up
@Override
public BindingResult<MessageHeaders> bind(ArgumentConversionContext<MessageHeaders> context, ConsumerRecord<?, ?> source) {

    KafkaHeaders kafkaHeaders = new KafkaHeaders(source.headers());
    return () -> Optional.of(kafkaHeaders);
}