Java Code Examples for org.apache.kafka.common.record.TimestampType#LOG_APPEND_TIME

The following examples show how to use org.apache.kafka.common.record.TimestampType#LOG_APPEND_TIME . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JsonPayloadFormatterTest.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
@Test
public void testTimestampTypesSinkRecord() throws IOException {
  TimestampType[] timestampTypes = {
      TimestampType.LOG_APPEND_TIME,
      TimestampType.CREATE_TIME,
      TimestampType.NO_TIMESTAMP_TYPE
  };

  for (TimestampType t : timestampTypes) {
    final SinkRecord record = new SinkRecord(
        TEST_TOPIC,
        TEST_PARTITION,
        null,
        null,
        null,
        null,
        TEST_OFFSET,
        TEST_TIMESTAMP,
        t
    );
    final String result = formatter.format(record);
    debugShow(record, result);

    Payload payload = new Payload<>();
    payload = mapper.readValue(result, payload.getClass());
    assertEquals(t.toString(), payload.getTimestampTypeName());
  }
}
 
Example 2
Source File: KafkaMirrorMakerConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) {
  long eventsSourceTimestamp =
      fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME ? fromKafka.timestamp() : readTime.toEpochMilli();
  HashMap<String, String> metadata = new HashMap<>();
  metadata.put(KAFKA_ORIGIN_CLUSTER, _mirrorMakerSource.getBrokerListString());
  String topic = fromKafka.topic();
  metadata.put(KAFKA_ORIGIN_TOPIC, topic);
  int partition = fromKafka.partition();
  String partitionStr = String.valueOf(partition);
  metadata.put(KAFKA_ORIGIN_PARTITION, partitionStr);
  long offset = fromKafka.offset();
  String offsetStr = String.valueOf(offset);
  metadata.put(KAFKA_ORIGIN_OFFSET, offsetStr);
  metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(eventsSourceTimestamp));
  BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata);
  DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder();
  builder.addEvent(envelope);
  builder.setEventsSourceTimestamp(eventsSourceTimestamp);
  builder.setSourceCheckpoint(new KafkaMirrorMakerCheckpoint(topic, partition, offset).toString());
  builder.setDestination(_datastreamTask.getDatastreamDestination()
      .getConnectionString()
      .replace(KafkaMirrorMakerConnector.MM_TOPIC_PLACEHOLDER,
          StringUtils.isBlank(_destinationTopicPrefix) ? topic : _destinationTopicPrefix + topic));
  if (_isIdentityMirroringEnabled) {
    builder.setPartition(partition);
  }
  return builder.build();
}
 
Example 3
Source File: KafkaConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) {
  HashMap<String, String> metadata = new HashMap<>();
  metadata.put("kafka-origin", _srcConnString.toString());
  int partition = fromKafka.partition();
  String partitionStr = String.valueOf(partition);
  metadata.put("kafka-origin-partition", partitionStr);
  String offsetStr = String.valueOf(fromKafka.offset());
  metadata.put("kafka-origin-offset", offsetStr);

  long eventsSourceTimestamp = readTime.toEpochMilli();
  if (fromKafka.timestampType() == TimestampType.CREATE_TIME) {
    // If the Kafka header contains the create time. We store the event creation time as event timestamp
    metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(fromKafka.timestamp()));
  } else if (fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME) {
    // If the Kafka header contains the log append time, We use that as event source Timestamp
    // which will be used to calculate the SLA.
    metadata.put(BrooklinEnvelopeMetadataConstants.SOURCE_TIMESTAMP, String.valueOf(fromKafka.timestamp()));
    metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(readTime.toEpochMilli()));
    eventsSourceTimestamp = fromKafka.timestamp();
  }

  BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata);
  DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder();
  builder.addEvent(envelope);
  builder.setEventsSourceTimestamp(eventsSourceTimestamp);
  builder.setPartition(partition); // assume source partition count is same as dest
  builder.setSourceCheckpoint(partitionStr + "-" + offsetStr);

  return builder.build();
}
 
Example 4
Source File: PulsarKafkaConsumer.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public ConsumerRecords<K, V> poll(long timeoutMillis) {
    try {
        QueueItem item = receivedMessages.poll(timeoutMillis, TimeUnit.MILLISECONDS);
        if (item == null) {
            return (ConsumerRecords<K, V>) ConsumerRecords.EMPTY;
        }

        Map<TopicPartition, List<ConsumerRecord<K, V>>> records = new HashMap<>();

        int numberOfRecords = 0;

        while (item != null) {
            TopicName topicName = TopicName.get(item.consumer.getTopic());
            String topic = topicName.getPartitionedTopicName();
            int partition = topicName.isPartitioned() ? topicName.getPartitionIndex() : 0;
            Message<byte[]> msg = item.message;
            MessageIdImpl msgId = (MessageIdImpl) msg.getMessageId();
            long offset = MessageIdUtils.getOffset(msgId);

            TopicPartition tp = new TopicPartition(topic, partition);
            if (lastReceivedOffset.get(tp) == null && !unpolledPartitions.contains(tp)) {
            	log.info("When polling offsets, invalid offsets were detected. Resetting topic partition {}", tp);
            	resetOffsets(tp);
            }

            K key = getKey(topic, msg);
            if (valueSchema instanceof PulsarKafkaSchema) {
                ((PulsarKafkaSchema<V>) valueSchema).setTopic(topic);
            }
            V value = valueSchema.decode(msg.getData());

            TimestampType timestampType = TimestampType.LOG_APPEND_TIME;
            long timestamp = msg.getPublishTime();

            if (msg.getEventTime() > 0) {
                // If we have Event time, use that in preference
                timestamp = msg.getEventTime();
                timestampType = TimestampType.CREATE_TIME;
            }

            ConsumerRecord<K, V> consumerRecord = new ConsumerRecord<>(topic, partition, offset, timestamp,
                    timestampType, -1, msg.hasKey() ? msg.getKey().length() : 0, msg.getData().length, key, value);

            records.computeIfAbsent(tp, k -> new ArrayList<>()).add(consumerRecord);

            // Update last offset seen by application
            lastReceivedOffset.put(tp, offset);
            unpolledPartitions.remove(tp);

            if (++numberOfRecords >= maxRecordsInSinglePoll) {
                break;
            }

            // Check if we have an item already available
            item = receivedMessages.poll(0, TimeUnit.MILLISECONDS);
        }

        if (isAutoCommit && !records.isEmpty()) {
            // Commit the offset of previously dequeued messages
            commitAsync();
        }

        // If no interceptor is provided, interceptors list will an empty list, original ConsumerRecords will be return.
        return applyConsumerInterceptorsOnConsume(interceptors, new ConsumerRecords<>(records));
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}