Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#timestamp()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecord#timestamp() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConsumerInterceptorTTL.java    From kafka_book_demo with Apache License 2.0 6 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    System.out.println("before:" + records);
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            if (now - record.timestamp() < EXPIRE_INTERVAL) {
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 2
Source File: SecorKafkaMessageIterator.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public Message next() {
    if (mRecordsBatch.isEmpty()) {
        mKafkaConsumer.poll(Duration.ofSeconds(mPollTimeout)).forEach(mRecordsBatch::add);
    }

    if (mRecordsBatch.isEmpty()) {
        return null;
    } else {
        ConsumerRecord<byte[], byte[]> consumerRecord = mRecordsBatch.pop();
        List<MessageHeader> headers = new ArrayList<>();
        consumerRecord.headers().forEach(header -> headers.add(new MessageHeader(header.key(), header.value())));
        return new Message(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(),
                consumerRecord.key(), consumerRecord.value(), consumerRecord.timestamp(), headers);
    }
}
 
Example 3
Source File: KafkaConsumer10.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  MessageAndOffset messageAndOffset;
  if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) {
    messageAndOffset = new MessageAndOffsetWithTimestamp(
        message.key(),
        message.value(),
        message.offset(),
        message.partition(),
        message.timestamp(),
        message.timestampType().toString()
    );
  } else {
    messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
  }
  return messageAndOffset;
}
 
Example 4
Source File: ParallelWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
private List<KafkaResult> consume(final KafkaConsumer kafkaConsumer) {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration);

    logger.info("Consumed {} records", consumerRecords.count());
    for (final ConsumerRecord consumerRecord : consumerRecords) {
        // Get next record
        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit(kafkaConsumer);
    return kafkaResultList;
}
 
Example 5
Source File: BaseKafkaConsumer11.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  MessageAndOffset messageAndOffset;
  if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) {
    messageAndOffset = new MessageAndOffsetWithTimestamp(
        message.key(),
        message.value(),
        message.offset(),
        message.partition(),
        message.timestamp(),
        message.timestampType().toString()
    );
  } else {
    messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
  }
  return messageAndOffset;
}
 
Example 6
Source File: ConsumerInterceptorTTL.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            Headers headers = record.headers();
            long ttl = -1;
            for (Header header : headers) {//判断headers中是否有key为"ttl"的Header
                if (header.key().equalsIgnoreCase("ttl")) {
                    ttl = BytesUtils.bytesToLong(header.value());
                }
            }
            //消息超时判定
            if (ttl > 0 && now - record.timestamp() < ttl * 1000) {
                newTpRecords.add(record);
            } else if (ttl < 0) {//没有设置ttl,无需超时判定
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 7
Source File: KafkaConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) {
  HashMap<String, String> metadata = new HashMap<>();
  metadata.put("kafka-origin", _srcConnString.toString());
  int partition = fromKafka.partition();
  String partitionStr = String.valueOf(partition);
  metadata.put("kafka-origin-partition", partitionStr);
  String offsetStr = String.valueOf(fromKafka.offset());
  metadata.put("kafka-origin-offset", offsetStr);

  long eventsSourceTimestamp = readTime.toEpochMilli();
  if (fromKafka.timestampType() == TimestampType.CREATE_TIME) {
    // If the Kafka header contains the create time. We store the event creation time as event timestamp
    metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(fromKafka.timestamp()));
  } else if (fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME) {
    // If the Kafka header contains the log append time, We use that as event source Timestamp
    // which will be used to calculate the SLA.
    metadata.put(BrooklinEnvelopeMetadataConstants.SOURCE_TIMESTAMP, String.valueOf(fromKafka.timestamp()));
    metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(readTime.toEpochMilli()));
    eventsSourceTimestamp = fromKafka.timestamp();
  }

  BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata);
  DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder();
  builder.addEvent(envelope);
  builder.setEventsSourceTimestamp(eventsSourceTimestamp);
  builder.setPartition(partition); // assume source partition count is same as dest
  builder.setSourceCheckpoint(partitionStr + "-" + offsetStr);

  return builder.build();
}
 
Example 8
Source File: KafkaMirrorMakerConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) {
  long eventsSourceTimestamp =
      fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME ? fromKafka.timestamp() : readTime.toEpochMilli();
  HashMap<String, String> metadata = new HashMap<>();
  metadata.put(KAFKA_ORIGIN_CLUSTER, _mirrorMakerSource.getBrokerListString());
  String topic = fromKafka.topic();
  metadata.put(KAFKA_ORIGIN_TOPIC, topic);
  int partition = fromKafka.partition();
  String partitionStr = String.valueOf(partition);
  metadata.put(KAFKA_ORIGIN_PARTITION, partitionStr);
  long offset = fromKafka.offset();
  String offsetStr = String.valueOf(offset);
  metadata.put(KAFKA_ORIGIN_OFFSET, offsetStr);
  metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(eventsSourceTimestamp));
  BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata);
  DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder();
  builder.addEvent(envelope);
  builder.setEventsSourceTimestamp(eventsSourceTimestamp);
  builder.setSourceCheckpoint(new KafkaMirrorMakerCheckpoint(topic, partition, offset).toString());
  builder.setDestination(_datastreamTask.getDatastreamDestination()
      .getConnectionString()
      .replace(KafkaMirrorMakerConnector.MM_TOPIC_PLACEHOLDER,
          StringUtils.isBlank(_destinationTopicPrefix) ? topic : _destinationTopicPrefix + topic));
  if (_isIdentityMirroringEnabled) {
    builder.setPartition(partition);
  }
  return builder.build();
}
 
Example 9
Source File: KafkaInput.java    From envelope with Apache License 2.0 5 votes vote down vote up
@Override
public Row call(ConsumerRecord record) {
  return new RowWithSchema(
      getProvidingSchema(),
      record.key(),
      record.value(),
      record.timestamp(),
      record.topic(),
      record.partition(),
      record.offset());
}
 
Example 10
Source File: DefaultWebKafkaConsumer.java    From kafka-webview with MIT License 5 votes vote down vote up
private List<KafkaResult> consume() {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords consumerRecords = kafkaConsumer.poll(pollTimeoutDuration);

    logger.info("Consumed {} records", consumerRecords.count());
    final Iterator<ConsumerRecord> recordIterator = consumerRecords.iterator();
    while (recordIterator.hasNext()) {
        // Get next record
        final ConsumerRecord consumerRecord = recordIterator.next();

        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit();
    return kafkaResultList;
}
 
Example 11
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 12
Source File: ConsumerInterceptorTTL.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecords<String, String> onConsume(
        ConsumerRecords<String, String> records) {
    long now = System.currentTimeMillis();
    Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords
            = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
        List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
        for (ConsumerRecord<String, String> record : tpRecords) {
            Headers headers = record.headers();
            long ttl = -1;
            for (Header header : headers) {//判断headers中是否有key为"ttl"的Header
                if (header.key().equalsIgnoreCase("ttl")) {
                    ttl = BytesUtils.bytesToLong(header.value());
                }
            }
            //消息超时判定
            if (ttl > 0 && now - record.timestamp() < ttl * 1000) {
                newTpRecords.add(record);
            } else {//没有设置ttl,无需超时判定
                newTpRecords.add(record);
            }
        }
        if (!newTpRecords.isEmpty()) {
            newRecords.put(tp, newTpRecords);
        }
    }
    return new ConsumerRecords<>(newRecords);
}
 
Example 13
Source File: ConsumerRecordConverter.java    From beast with Apache License 2.0 5 votes vote down vote up
public List<Record> convert(final Iterable<ConsumerRecord<byte[], byte[]>> messages) throws InvalidProtocolBufferException {
    ArrayList<Record> records = new ArrayList<>();
    for (ConsumerRecord<byte[], byte[]> message : messages) {
        byte[] value = message.value();
        Map<String, Object> columns = rowMapper.map(parser.parse(value));
        OffsetInfo offsetInfo = new OffsetInfo(message.topic(), message.partition(), message.offset(), message.timestamp());
        addMetadata(columns, offsetInfo);
        records.add(new Record(offsetInfo, columns));
    }
    return records;
}
 
Example 14
Source File: ConsumerSpEL.java    From DataflowTemplates with Apache License 2.0 4 votes vote down vote up
public long getRecordTimestamp(ConsumerRecord<byte[], byte[]> rawRecord) {
  if (hasRecordTimestamp) {
    return rawRecord.timestamp();
  }
  return -1L; // This is the timestamp used in Kafka for older messages without timestamps.
}
 
Example 15
Source File: KafkaEvent.java    From mewbase with MIT License 4 votes vote down vote up
public KafkaEvent(ConsumerRecord<String, byte[]> rec) {
    eventNumber = rec.offset();
    epochMillis = rec.timestamp();
    eventBuf = rec.value();
    crc32 = EventUtils.checksum(eventBuf)  ;
}
 
Example 16
Source File: KafkaSourceTask.java    From MirrorTool-for-Kafka-Connect with Apache License 2.0 4 votes vote down vote up
@Override
public List<SourceRecord> poll() {
  if (logger.isDebugEnabled())
    logger.debug("{}: poll()", this);
  synchronized (stopLock) {
    if (!stop.get())
      poll.set(true);
  }
  ArrayList<SourceRecord> records = new ArrayList<>();
  if (poll.get()) {
    try {
      ConsumerRecords<byte[], byte[]> krecords = consumer.poll(Duration.ofMillis(pollTimeout));
      if (logger.isDebugEnabled())
        logger.debug("{}: Got {} records from source.", this, krecords.count());
      for (ConsumerRecord<byte[], byte[]> krecord : krecords) {
        Map<String, String> sourcePartition = Collections.singletonMap(TOPIC_PARTITION_KEY,
            krecord.topic().concat(":").concat(Integer.toString(krecord.partition())));
        Map<String, Long> sourceOffset = Collections.singletonMap(OFFSET_KEY, krecord.offset());
        String sourceTopic = krecord.topic();
        String destinationTopic = sourceTopic;
        byte[] recordKey = krecord.key();
        byte[] recordValue = krecord.value();
        long recordTimestamp = krecord.timestamp();
        if (logger.isDebugEnabled()) {
          logger.trace(
              "Task: sourceTopic:{} sourcePartition:{} sourceOffSet:{} destinationTopic:{}, key:{}, valueSize:{}",
              sourceTopic, krecord.partition(), krecord.offset(), destinationTopic, recordKey,
              krecord.serializedValueSize());
        }
        if (includeHeaders) {
          // Mapping from source type: org.apache.kafka.common.header.Headers, to
          // destination type: org.apache.kafka.connect.Headers
          Headers sourceHeaders = krecord.headers();
          ConnectHeaders destinationHeaders = new ConnectHeaders();
          for (Header header : sourceHeaders) {
            if (header != null) {
              destinationHeaders.add(header.key(), header.value(), Schema.OPTIONAL_BYTES_SCHEMA);
            }
          }
          records.add(
              new SourceRecord(sourcePartition, sourceOffset, destinationTopic, null, Schema.OPTIONAL_BYTES_SCHEMA,
                  recordKey, Schema.OPTIONAL_BYTES_SCHEMA, recordValue, recordTimestamp, destinationHeaders));
        } else {
          records.add(new SourceRecord(sourcePartition, sourceOffset, destinationTopic, null,
              Schema.OPTIONAL_BYTES_SCHEMA, recordKey, Schema.OPTIONAL_BYTES_SCHEMA, recordValue, recordTimestamp));
        }
      }
    } catch (WakeupException e) {
      logger.info("{}: Caught WakeupException. Probably shutting down.", this);
    }
  }
  poll.set(false);
  // If stop has been set processing, then stop the consumer.
  if (stop.get()) {
    logger.debug("{}: stop flag set during poll(), opening stopLatch", this);
    stopLatch.countDown();
  }
  if (logger.isDebugEnabled())
    logger.debug("{}: Returning {} records to connect", this, records.size());
  return records;
}
 
Example 17
Source File: ConsumerSpEL.java    From beam with Apache License 2.0 4 votes vote down vote up
public long getRecordTimestamp(ConsumerRecord<byte[], byte[]> rawRecord) {
  if (hasRecordTimestamp) {
    return rawRecord.timestamp();
  }
  return -1L; // This is the timestamp used in Kafka for older messages without timestamps.
}
 
Example 18
Source File: MessageListenerThread.java    From core-ng-project with Apache License 2.0 4 votes vote down vote up
private <T> void handle(String topic, MessageProcess<T> process, List<ConsumerRecord<byte[], byte[]>> records, double longProcessThresholdInNano) {
    for (ConsumerRecord<byte[], byte[]> record : records) {
        ActionLog actionLog = logManager.begin("=== message handling begin ===");
        try {
            actionLog.action("topic:" + topic);
            actionLog.context("topic", topic);
            actionLog.context("handler", process.handler.getClass().getCanonicalName());
            actionLog.track("kafka", 0, 1, 0);

            Headers headers = record.headers();
            if ("true".equals(header(headers, MessageHeaders.HEADER_TRACE))) actionLog.trace = true;
            String correlationId = header(headers, MessageHeaders.HEADER_CORRELATION_ID);
            if (correlationId != null) actionLog.correlationIds = List.of(correlationId);
            String client = header(headers, MessageHeaders.HEADER_CLIENT);
            if (client != null) actionLog.clients = List.of(client);
            String refId = header(headers, MessageHeaders.HEADER_REF_ID);
            if (refId != null) actionLog.refIds = List.of(refId);
            logger.debug("[header] refId={}, client={}, correlationId={}", refId, client, correlationId);

            String key = key(record);
            actionLog.context("key", key);

            long timestamp = record.timestamp();
            logger.debug("[message] timestamp={}", timestamp);
            long lag = actionLog.date.toEpochMilli() - timestamp;
            actionLog.stat("consumer_lag_in_ms", lag);
            checkConsumerLag(lag, longConsumerLagThresholdInMs);

            byte[] value = record.value();
            logger.debug("[message] value={}", new BytesLogParam(value));
            T message = process.mapper.fromJSON(value);
            process.validator.validate(message, false);
            process.handler.handle(key, message);
        } catch (Throwable e) {
            logManager.logError(e);
        } finally {
            long elapsed = actionLog.elapsed();
            checkSlowProcess(elapsed, longProcessThresholdInNano);
            logManager.end("=== message handling end ===");
        }
    }
}
 
Example 19
Source File: KafkaEventMessage.java    From extension-kafka with Apache License 2.0 3 votes vote down vote up
/**
 * Construct a {@link KafkaEventMessage} based on the deserialized body, the {@code eventMessage}, of a {@link
 * ConsumerRecord} retrieved from a Kafka topic. The {@code trackingToken} is used to change the {@code
 * eventMessage} in an {@link TrackedEventMessage}.
 *
 * @param eventMessage   the {@link EventMessage} to wrap
 * @param consumerRecord the {@link ConsumerRecord} which the given {@code eventMessage} was the body of
 * @param trackingToken  the {@link KafkaTrackingToken} defining the position of this message
 * @return the {@link KafkaEventMessage} constructed from the given {@code eventMessage}, {@code consumerRecord} and
 * {@code trackingToken}
 */
public static KafkaEventMessage from(EventMessage<?> eventMessage,
                                     ConsumerRecord<?, ?> consumerRecord,
                                     KafkaTrackingToken trackingToken) {
    return new KafkaEventMessage(
            asTrackedEventMessage(eventMessage, trackingToken),
            consumerRecord.partition(), consumerRecord.offset(), consumerRecord.timestamp()
    );
}
 
Example 20
Source File: KafkaConsumerProxy.java    From samza with Apache License 2.0 3 votes vote down vote up
/**
 * Convert a {@link ConsumerRecord} to an {@link IncomingMessageEnvelope}. This may also execute some other custom
 * logic for each new {@link IncomingMessageEnvelope}.
 *
 * This has a protected visibility so that {@link KafkaConsumerProxy} can be extended to add special handling logic
 * for custom Kafka systems.
 *
 * @param consumerRecord {@link ConsumerRecord} from Kafka that was consumed
 * @param systemStreamPartition {@link SystemStreamPartition} corresponding to the record
 * @return {@link IncomingMessageEnvelope} corresponding to the {@code consumerRecord}
 */
protected IncomingMessageEnvelope handleNewRecord(ConsumerRecord<K, V> consumerRecord,
    SystemStreamPartition systemStreamPartition) {
  return new IncomingMessageEnvelope(systemStreamPartition, String.valueOf(consumerRecord.offset()),
      consumerRecord.key(), consumerRecord.value(), getRecordSize(consumerRecord), consumerRecord.timestamp(),
      Instant.now().toEpochMilli());
}