Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#key()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecord#key() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseKafkaConsumer11.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  MessageAndOffset messageAndOffset;
  if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) {
    messageAndOffset = new MessageAndOffsetWithTimestamp(
        message.key(),
        message.value(),
        message.offset(),
        message.partition(),
        message.timestamp(),
        message.timestampType().toString()
    );
  } else {
    messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
  }
  return messageAndOffset;
}
 
Example 2
Source File: ParallelWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
private List<KafkaResult> consume(final KafkaConsumer kafkaConsumer) {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration);

    logger.info("Consumed {} records", consumerRecords.count());
    for (final ConsumerRecord consumerRecord : consumerRecords) {
        // Get next record
        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit(kafkaConsumer);
    return kafkaResultList;
}
 
Example 3
Source File: KafkaConsumerHelper.java    From zerocode with Apache License 2.0 6 votes vote down vote up
public static void readJson(List<ConsumerJsonRecord> jsonRecords,
                            Iterator recordIterator) throws IOException {
    while (recordIterator.hasNext()) {
        ConsumerRecord thisRecord = (ConsumerRecord) recordIterator.next();

        Object key = thisRecord.key();
        Object value = thisRecord.value();
        Headers headers = thisRecord.headers();
        LOGGER.info("\nRecord Key - {} , Record value - {}, Record partition - {}, Record offset - {}, Headers - {}",
                key, value, thisRecord.partition(), thisRecord.offset(), headers);

        JsonNode valueNode = objectMapper.readTree(value.toString());
        Map<String, String> headersMap = null;
        if (headers != null) {
            headersMap = new HashMap<>();
            for (Header header : headers) {
                headersMap.put(header.key(), new String(header.value()));
            }
        }
        ConsumerJsonRecord jsonRecord = new ConsumerJsonRecord(thisRecord.key(), null, valueNode, headersMap);
        jsonRecords.add(jsonRecord);
    }
}
 
Example 4
Source File: ConsumeDataIterator.java    From oryx with Apache License 2.0 6 votes vote down vote up
@Override
protected KeyMessage<K,V> computeNext() {
  if (iterator == null || !iterator.hasNext()) {
    try {
      long timeout = MIN_POLL_MS;
      ConsumerRecords<K, V> records;
      while ((records = consumer.poll(timeout)).isEmpty()) {
        timeout = Math.min(MAX_POLL_MS, timeout * 2);
      }
      iterator = records.iterator();
    } catch (Exception e) {
      consumer.close();
      return endOfData();
    }
  }
  ConsumerRecord<K,V> mm = iterator.next();
  return new KeyMessageImpl<>(mm.key(), mm.value());
}
 
Example 5
Source File: TopicFilter.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
@Override
public void onMessage(ConsumerRecord<K, byte[]> record) {
   K key = record.key();
   if (key == null && !acceptNullKeys()) {
      return;
   }
   if (!acceptKey(key)) {
      return;
   }

   M payload = deserializeMessage(record.value());
   if (!acceptMessage(payload)) {
      return;
   }
   deliver(payload);
}
 
Example 6
Source File: SecorKafkaClient.java    From secor with Apache License 2.0 6 votes vote down vote up
private Message readSingleMessage(KafkaConsumer<byte[], byte[]> kafkaConsumer) {
    int pollAttempts = 0;
    Message message = null;
    while (pollAttempts < MAX_READ_POLL_ATTEMPTS) {
        Iterator<ConsumerRecord<byte[], byte[]>> records = kafkaConsumer.poll(Duration.ofSeconds(mPollTimeout)).iterator();
        if (!records.hasNext()) {
            pollAttempts++;
        } else {
            ConsumerRecord<byte[], byte[]> record = records.next();
            List<MessageHeader> headers = new ArrayList<>();
            record.headers().forEach(header -> headers.add(new MessageHeader(header.key(), header.value())));
            message = new Message(record.topic(), record.partition(), record.offset(), record.key(), record.value(), record.timestamp(), headers);
            break;
        }
    }

    if (message == null) {
        LOG.warn("unable to fetch message after " + MAX_READ_POLL_ATTEMPTS + " Retries");
    }
    return message;
}
 
Example 7
Source File: KafkaConsumer10.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  MessageAndOffset messageAndOffset;
  if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) {
    messageAndOffset = new MessageAndOffsetWithTimestamp(
        message.key(),
        message.value(),
        message.offset(),
        message.partition(),
        message.timestamp(),
        message.timestampType().toString()
    );
  } else {
    messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
  }
  return messageAndOffset;
}
 
Example 8
Source File: DefaultWebKafkaConsumer.java    From kafka-webview with MIT License 5 votes vote down vote up
private List<KafkaResult> consume() {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords consumerRecords = kafkaConsumer.poll(pollTimeoutDuration);

    logger.info("Consumed {} records", consumerRecords.count());
    final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator();
    while (recordIterator.hasNext()) {
        // Get next record
        final ConsumerRecord consumerRecord = recordIterator.next();

        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit();
    return kafkaResultList;
}
 
Example 9
Source File: KafkaEasyTransMsgConsumerImpl.java    From EasyTransaction with Apache License 2.0 5 votes vote down vote up
private void reconsumeLater(ConsumerRecord<String, byte[]> consumeRecord) throws InterruptedException, ExecutionException {

		// add all header to headList except RETRY_COUNT
		Headers headers = consumeRecord.headers();
		List<Header> headerList = new ArrayList<Header>(8);
		Iterator<Header> iterator = headers.iterator();
		Integer retryCount = -1;
		boolean hasOrignalHeader = false;
		while (iterator.hasNext()) {
			Header next = iterator.next();
			if (next.key().equals(RETRY_COUNT_KEY)) {
				retryCount = serializer.deserialize(next.value());
				continue;
			}
			
			if(next.key().equals(ORGINAL_TOPIC)){
				hasOrignalHeader = true;
			}
			headerList.add(next);
		}
		
		// add RETRY_COUNT to header
		retryCount++;
		headerList.add(new RecordHeader(RETRY_COUNT_KEY, serializer.serialization(retryCount)));
		
		if(!hasOrignalHeader){
			headerList.add(new RecordHeader(ORGINAL_TOPIC, serializer.serialization(consumeRecord.topic())));
		}

		// send message to corresponding queue according to retry times
		String retryTopic = calcRetryTopic(consumeRecord.topic(), retryCount);
		
		ProducerRecord<String, byte[]> record = new ProducerRecord<>(retryTopic,
				consumeRecord.partition() % retryQueuePartitionCount.get(retryTopic), null, consumeRecord.key(),
				consumeRecord.value(), headerList);
		Future<RecordMetadata> publishKafkaMessage = retryQueueMsgProducer.publishKafkaMessage(record);
		publishKafkaMessage.get();
	}
 
Example 10
Source File: KafkaInput.java    From envelope with Apache License 2.0 5 votes vote down vote up
@Override
public Row call(ConsumerRecord record) {
  return new RowWithSchema(
      getProvidingSchema(),
      record.key(),
      record.value(),
      record.timestamp(),
      record.topic(),
      record.partition(),
      record.offset());
}
 
Example 11
Source File: KafkaTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
/** When an upstream context was not present, lookup keys are unlikely added */
static void addTags(ConsumerRecord<?, ?> record, SpanCustomizer result) {
  if (record.key() instanceof String && !"".equals(record.key())) {
    result.tag(KafkaTags.KAFKA_KEY_TAG, record.key().toString());
  }
  result.tag(KafkaTags.KAFKA_TOPIC_TAG, record.topic());
}
 
Example 12
Source File: TypeInformationKeyValueSerializationSchema.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Tuple2<K, V> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception {
	K key = null;
	V value = null;

	if (record.key() != null) {
		inputDeserializer.setBuffer(record.key());
		key = keySerializer.deserialize(inputDeserializer);
	}
	if (record.value() != null) {
		inputDeserializer.setBuffer(record.value());
		value = valueSerializer.deserialize(inputDeserializer);
	}
	return new Tuple2<>(key, value);
}
 
Example 13
Source File: DistributedClusterTest.java    From kop with Apache License 2.0 5 votes vote down vote up
protected void kafkaConsumeCommitMessage(KConsumer kConsumer,
                                         int numMessages,
                                         String messageStrPrefix,
                                         List<TopicPartition> topicPartitions) {
    kConsumer.getConsumer().assign(topicPartitions);
    int i = 0;
    while (i < numMessages) {
        if (log.isDebugEnabled()) {
            log.debug("kConsumer {} start poll message: {}",
                kConsumer.getTopic() + kConsumer.getConsumerGroup(), i);
        }
        ConsumerRecords<Integer, String> records = kConsumer.getConsumer().poll(Duration.ofSeconds(1));
        for (ConsumerRecord<Integer, String> record : records) {
            Integer key = record.key();
            assertEquals(messageStrPrefix + key.toString(), record.value());

            if (log.isDebugEnabled()) {
                log.debug("Kafka consumer get message: {}, key: {} at offset {}",
                    record.key(), record.value(), record.offset());
            }
            i++;
        }
    }

    assertEquals(i, numMessages);

    try {
        kConsumer.getConsumer().commitSync(Duration.ofSeconds(1));
    } catch (Exception e) {
        log.error("Commit offset failed: ", e);
    }

    if (log.isDebugEnabled()) {
        log.debug("kConsumer {} finished poll and commit message: {}",
            kConsumer.getTopic() + kConsumer.getConsumerGroup(), i);
    }
}
 
Example 14
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 4 votes vote down vote up
@Override
public Message<String, String> extract(final ConsumerRecord<byte[], byte[]> record) {
    String key = record.key() != null ? new String(record.key()) : null;
    String value = record.value() != null ? new String(record.value()) : null;
    return new Message<>(key, value);
}
 
Example 15
Source File: ProductListener.java    From micronaut-kafka with Apache License 2.0 4 votes vote down vote up
@Topic("awesome-products")
public void receive(ConsumerRecord<String, Product> record) { // <1>
    Product product = record.value(); // <2>
    String brand = record.key(); // <3>
    System.out.println("Got Product - " + product.getName() + " by " + brand);
}
 
Example 16
Source File: KafkaKeyBinder.java    From micronaut-kafka with Apache License 2.0 4 votes vote down vote up
@Override
public BindingResult<T> bind(ArgumentConversionContext<T> context, ConsumerRecord<?, ?> source) {
    Object key = source.key();
    Optional<T> converted = ConversionService.SHARED.convert(key, context);
    return () -> converted;
}
 
Example 17
Source File: KafkaConsumer09.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  return new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
}
 
Example 18
Source File: NewApiTopicConsumer.java    From jeesuite-libs with Apache License 2.0 4 votes vote down vote up
/**
 * @param record
 */
private void processConsumerRecords(final ConsumerRecord<String, Serializable> record) {
	//兼容没有包装的情况
	final DefaultMessage message = record.value() instanceof DefaultMessage ? (DefaultMessage) record.value() : new DefaultMessage(record.key(),(Serializable) record.value());
	final MessageHandler messageHandler = topicHandlers.get(record.topic());
	
	message.setTopicMetadata(record.topic(), record.partition(), record.offset());
	
	consumerContext.updateConsumerStats(record.topic(),1);
	//
	consumerContext.saveOffsetsBeforeProcessed(record.topic(), record.partition(), record.offset() + 1);
	//第一阶段处理
	messageHandler.p1Process(message);
	//第二阶段处理
	(message.isConsumerAckRequired() ? highProcessExecutor : defaultProcessExecutor).submit(new Runnable() {
		@Override
		public void run() {
			try {									
				messageHandler.p2Process(message);
				//
				if(!offsetAutoCommit){
					uncommittedOffsetMap.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1));
					//
					uncommittedNums.incrementAndGet();
				}
				
				//回执
                      if(message.isConsumerAckRequired()){
                      	consumerContext.sendConsumerAck(message.getMsgId());
				}
				//
				consumerContext.saveOffsetsAfterProcessed(record.topic(), record.partition(), record.offset() + 1);
			} catch (Exception e) {
				boolean processed = messageHandler.onProcessError(message);
				if(processed == false){
					consumerContext.processErrorMessage(record.topic(), message);
				}
				logger.error("["+messageHandler.getClass().getSimpleName()+"] process Topic["+record.topic()+"] error",e);
			}
			
			consumerContext.updateConsumerStats(record.topic(),-1);
		}
	});
}
 
Example 19
Source File: DBusConsumerRecord.java    From DBus with Apache License 2.0 4 votes vote down vote up
public DBusConsumerRecord(ConsumerRecord<K, V> record) {
    this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(),
            record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value());
}
 
Example 20
Source File: KafkaConsumerProxy.java    From samza with Apache License 2.0 2 votes vote down vote up
/**
 * Protected to help extensions of this class build {@link IncomingMessageEnvelope}s.
 * @param r consumer record to size
 * @return the size of the serialized record
 */
protected int getRecordSize(ConsumerRecord<K, V> r) {
  int keySize = (r.key() == null) ? 0 : r.serializedKeySize();
  return keySize + r.serializedValueSize();
}