Java Code Examples for org.apache.kafka.common.record.TimestampType#NO_TIMESTAMP_TYPE

The following examples show how to use org.apache.kafka.common.record.TimestampType#NO_TIMESTAMP_TYPE . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaConsumer10.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  MessageAndOffset messageAndOffset;
  if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) {
    messageAndOffset = new MessageAndOffsetWithTimestamp(
        message.key(),
        message.value(),
        message.offset(),
        message.partition(),
        message.timestamp(),
        message.timestampType().toString()
    );
  } else {
    messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
  }
  return messageAndOffset;
}
 
Example 2
Source File: BaseKafkaConsumer11.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) {
  MessageAndOffset messageAndOffset;
  if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) {
    messageAndOffset = new MessageAndOffsetWithTimestamp(
        message.key(),
        message.value(),
        message.offset(),
        message.partition(),
        message.timestamp(),
        message.timestampType().toString()
    );
  } else {
    messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition());
  }
  return messageAndOffset;
}
 
Example 3
Source File: TimestampNowTest.java    From kafka-connect-transform-common with Apache License 2.0 6 votes vote down vote up
@Test
public void test() {
  final SinkRecord input = new SinkRecord(
      "test",
      1,
      null,
      "",
      null,
      "",
      1234123L,
      12341312L,
      TimestampType.NO_TIMESTAMP_TYPE
  );
  final Long expectedTimestamp = 1537808219123L;
  TimestampNow<SinkRecord> transform = new TimestampNow<>();
  transform.time = mock(Time.class);
  when(transform.time.milliseconds()).thenReturn(expectedTimestamp);
  final SinkRecord actual = transform.apply(input);
  assertEquals(expectedTimestamp, actual.timestamp(), "Timestamp should match.");
  verify(transform.time, times(1)).milliseconds();
}
 
Example 4
Source File: SetNullTest.java    From kafka-connect-transform-common with Apache License 2.0 6 votes vote down vote up
@Test
public void test() {
  final SinkRecord input = new SinkRecord(
      "test",
      1,
      Schema.STRING_SCHEMA,
      "key",
      null,
      "",
      1234123L,
      12341312L,
      TimestampType.NO_TIMESTAMP_TYPE
  );
  final Long expectedTimestamp = 1537808219123L;
  SetNull<SinkRecord> transform = new SetNull.Key<>();
  final SinkRecord actual = transform.apply(input);
  assertNull(actual.key(), "key should be null.");
  assertNull(actual.keySchema(), "keySchema should be null.");
}
 
Example 5
Source File: KafkaSourceTaskTest.java    From MirrorTool-for-Kafka-Connect with Apache License 2.0 6 votes vote down vote up
private ConsumerRecords<byte[], byte[]> createTestRecordsWithHeaders() {
  RecordHeader header = new RecordHeader("testHeader", new byte[0]);
  RecordHeaders headers = new RecordHeaders();
  headers.add(header);
  TimestampType timestampType = TimestampType.NO_TIMESTAMP_TYPE;

  byte testByte = 0;
  byte[] testKey = { testByte };
  byte[] testValue = { testByte };

  ConnectHeaders destinationHeaders = new ConnectHeaders();
  destinationHeaders.add(header.key(), header.value(), Schema.OPTIONAL_BYTES_SCHEMA);
  ConsumerRecord<byte[], byte[]> testConsumerRecord = new ConsumerRecord<byte[], byte[]>(FIRST_TOPIC, FIRST_PARTITION,
      FIRST_OFFSET, System.currentTimeMillis(), timestampType, 0L, 0, 0, testKey, testValue, headers);

  TopicPartition topicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION);
  List<ConsumerRecord<byte[], byte[]>> consumerRecords = new ArrayList<>();
  consumerRecords.add(testConsumerRecord);

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> consumerRecordMap = new HashMap<>(1);
  consumerRecordMap.put(topicPartition, consumerRecords);
  ConsumerRecords<byte[], byte[]> testRecords = new ConsumerRecords<>(consumerRecordMap);
  return testRecords;
}
 
Example 6
Source File: BaseRecordWeigherTest.java    From kafka-workers with Apache License 2.0 5 votes vote down vote up
private WorkerRecord<String, String> workerRecordWithStrings(int keyLength, int valueLength) {
    String key = StringUtils.repeat(SOME_CHAR, keyLength);
    String value = StringUtils.repeat(SOME_CHAR, valueLength);
    ConsumerRecord<String, String> consumerRecord = new ConsumerRecord<>(EMPTY_TOPIC, SOME_PARTITION, SOME_OFFSET,
            ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, ConsumerRecord.NULL_CHECKSUM,
            key.getBytes(UTF_8).length, value.getBytes(UTF_8).length,
            key, value);
    return new WorkerRecord<>(consumerRecord, SOME_SUBPARTITION);
}
 
Example 7
Source File: DBusConsumerRecord.java    From DBus with Apache License 2.0 5 votes vote down vote up
public DBusConsumerRecord() {
    this.topic = "";
    this.partition = 0;
    this.offset = 0;
    this.timestamp = 0;
    this.timestampType = TimestampType.NO_TIMESTAMP_TYPE;
    this.checksum = 0;
    this.serializedKeySize = 0;
    this.serializedValueSize = 0;
    this.key = null;
    this.value = null;
}
 
Example 8
Source File: BaseRecordWeigherTest.java    From kafka-workers with Apache License 2.0 5 votes vote down vote up
private WorkerRecord<byte[], byte[]> emptyWorkerRecordWithHeaders(String[] headers) {
    RecordHeaders recordHeaders = new RecordHeaders();
    for (String headerStr: headers) {
        String[] split = headerStr.split(":");
        recordHeaders.add(new RecordHeader(split[0], split[1].getBytes(ISO_8859_1)));
    }
    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>(EMPTY_TOPIC, SOME_PARTITION, SOME_OFFSET,
            ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, (long) ConsumerRecord.NULL_CHECKSUM,
            0, 0,
            new byte[0], new byte[0],
            recordHeaders);

    return new WorkerRecord<>(consumerRecord, SOME_SUBPARTITION);
}
 
Example 9
Source File: PlainPayloadFormatterTest.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
private SinkRecord createSinkRecord(Schema keySchema, Object key, Schema valueSchema, Object value) {
  return new SinkRecord(
      TEST_TOPIC,
      TEST_PARTITION,
      keySchema,
      key,
      valueSchema,
      value,
      TEST_OFFSET,
      TEST_TIMESTAMP,
      TimestampType.NO_TIMESTAMP_TYPE
  );
}
 
Example 10
Source File: BaseRecordWeigherTest.java    From kafka-workers with Apache License 2.0 5 votes vote down vote up
private WorkerRecord<byte[], byte[]> workerRecordWithBytes(int keyLength, int valueLength) {
    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>(EMPTY_TOPIC, SOME_PARTITION, SOME_OFFSET,
            ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, ConsumerRecord.NULL_CHECKSUM,
            keyLength, valueLength,
            new byte[keyLength], new byte[valueLength]);
    return new WorkerRecord<>(consumerRecord, SOME_SUBPARTITION);
}
 
Example 11
Source File: RecordTest.java    From kafka-backup with Apache License 2.0 5 votes vote down vote up
/**
 * This is not used during normal operations, but we need to verify that this works
 * correctly as we use the functions for our end to end tests!
 */
@Test
public void roundtripSinkRecordTest() {

    // given
    Record a = new Record(TOPIC, PARTITION, KEY_BYTES, VALUE_BYTES, OFFSET, TIMESTAMP, TIMESTAMP_TYPE, HEADERS);
    Record b = new Record(TOPIC, PARTITION, KEY_BYTES, VALUE_BYTES, 3, null, TimestampType.NO_TIMESTAMP_TYPE, HEADERS);
    Record c = new Record(TOPIC, PARTITION, KEY_BYTES, VALUE_BYTES, 0);
    Record d = new Record(TOPIC, PARTITION, null, null, 1);
    Record e = new Record(TOPIC, PARTITION, new byte[0], new byte[0], 2);
    Record f = new Record(TOPIC, PARTITION, KEY_BYTES, VALUE_BYTES, OFFSET);

    // transform
    SinkRecord srA = a.toSinkRecord();
    SinkRecord srB = b.toSinkRecord();
    SinkRecord srC = c.toSinkRecord();
    SinkRecord srD = d.toSinkRecord();
    SinkRecord srE = e.toSinkRecord();
    SinkRecord srF = f.toSinkRecord();

    // expect
    assertEquals(a, Record.fromSinkRecord(srA));
    assertEquals(b, Record.fromSinkRecord(srB));
    assertEquals(c, Record.fromSinkRecord(srC));
    assertEquals(d, Record.fromSinkRecord(srD));
    assertEquals(e, Record.fromSinkRecord(srE));
    assertEquals(f, Record.fromSinkRecord(srF));


}
 
Example 12
Source File: GcsSinkTaskTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 5 votes vote down vote up
private SinkRecord createNullRecord(final String topic,
                                    final int partition,
                                    final int offset) {
    return new SinkRecord(
        topic,
        partition,
        Schema.BYTES_SCHEMA,
        null,
        Schema.BYTES_SCHEMA,
        null,
        offset,
        null,
        TimestampType.NO_TIMESTAMP_TYPE);
}
 
Example 13
Source File: JsonPayloadFormatterTest.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
private SinkRecord createSinkRecord(Schema keySchema, Object key, Schema valueSchema, Object value) {
  return new SinkRecord(
      TEST_TOPIC,
      TEST_PARTITION,
      keySchema,
      key,
      valueSchema,
      value,
      TEST_OFFSET,
      TEST_TIMESTAMP,
      TimestampType.NO_TIMESTAMP_TYPE
  );
}
 
Example 14
Source File: JsonPayloadFormatterTest.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
@Test
public void testTimestampTypesSinkRecord() throws IOException {
  TimestampType[] timestampTypes = {
      TimestampType.LOG_APPEND_TIME,
      TimestampType.CREATE_TIME,
      TimestampType.NO_TIMESTAMP_TYPE
  };

  for (TimestampType t : timestampTypes) {
    final SinkRecord record = new SinkRecord(
        TEST_TOPIC,
        TEST_PARTITION,
        null,
        null,
        null,
        null,
        TEST_OFFSET,
        TEST_TIMESTAMP,
        t
    );
    final String result = formatter.format(record);
    debugShow(record, result);

    Payload payload = new Payload<>();
    payload = mapper.readValue(result, payload.getClass());
    assertEquals(t.toString(), payload.getTimestampTypeName());
  }
}
 
Example 15
Source File: Record.java    From kafka-backup with Apache License 2.0 4 votes vote down vote up
public Record(String topic, int partition, byte[] key, byte[] value, long kafkaOffset) {
    this(topic, partition, key, value, kafkaOffset, null, TimestampType.NO_TIMESTAMP_TYPE);
}
 
Example 16
Source File: MockKafka.java    From jackdaw with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
ConsumerRecord<K, V> defaultRecordMapping(ProducerRecord<K, V> record, RecordMetadata metadata) {
  final int partition = record.partition() != null ? record.partition() : metadata.partition();
  return new ConsumerRecord<>(record.topic(), partition, metadata.offset(), metadata.timestamp(),
          TimestampType.NO_TIMESTAMP_TYPE, -1L, -1, -1,
          record.key(), record.value(), record.headers());
}
 
Example 17
Source File: RecordSerde.java    From kafka-backup with Apache License 2.0 4 votes vote down vote up
public static void write(OutputStream outputStream, Record record) throws IOException {
    DataOutputStream dataStream = new DataOutputStream(outputStream);
    dataStream.writeLong(record.kafkaOffset());
    // There is a special case where the timestamp type eqauls `CREATE_TIME` but is actually `null`.
    // This should not happen normally and I see it as a bug in the Client implementation of pykafka
    // But as Kafka accepts that value, so should Kafka Backup. Thus, this dirty workaround: we write the
    // timestamp type `-2` if the type is CREATE_TIME but the timestamp itself is null. Otherwise we would have
    // needed to change the byte format and for now I think this is the better solution.
    if (record.timestampType() == TimestampType.CREATE_TIME && record.timestamp() == null) {
        dataStream.writeInt(-2);
    } else {
        dataStream.writeInt(record.timestampType().id);
        if (record.timestampType() != TimestampType.NO_TIMESTAMP_TYPE) {
            dataStream.writeLong(record.timestamp());
        }
    }
    if (record.key() != null) {
        dataStream.writeInt(record.key().length);
        dataStream.write(record.key());
    } else {
        dataStream.writeInt(-1);
    }
    if (record.value() != null) {
        dataStream.writeInt(record.value().length);
        dataStream.write(record.value());
    } else {
        dataStream.writeInt(-1);
    }
    Header[] headers = record.headers().toArray();
    dataStream.writeInt(headers.length);
    for (Header header : record.headers()) {
        byte[] headerKeyBytes = header.key().getBytes(StandardCharsets.UTF_8);
        dataStream.writeInt(headerKeyBytes.length);
        dataStream.write(headerKeyBytes);
        if (header.value() != null) {
            dataStream.writeInt(header.value().length);
            dataStream.write(header.value());
        } else {
            dataStream.writeInt(-1);
        }
    }
}
 
Example 18
Source File: HeaderToFieldTest.java    From kafka-connect-transform-common with Apache License 2.0 4 votes vote down vote up
@Test
public void apply() throws IOException {
  this.transformation.configure(
      ImmutableMap.of(HeaderToFieldConfig.HEADER_MAPPINGS_CONF, "applicationId:STRING")
  );

  ConnectHeaders inputHeaders = new ConnectHeaders();
  inputHeaders.addString("applicationId", "testing");

  Schema inputSchema = SchemaBuilder.struct()
      .field("firstName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("lastName", Schema.OPTIONAL_STRING_SCHEMA)
      .build();

  Struct inputStruct = new Struct(inputSchema)
      .put("firstName", "example")
      .put("lastName", "user");

  Schema expectedSchema = SchemaBuilder.struct()
      .field("firstName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("lastName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("applicationId", Schema.OPTIONAL_STRING_SCHEMA)
      .build();
  Struct expectedStruct = new Struct(expectedSchema)
      .put("firstName", "example")
      .put("lastName", "user")
      .put("applicationId", "testing");

  SinkRecord inputRecord = new SinkRecord(
      "testing",
      1,
      null,
      null,
      inputStruct.schema(),
      inputStruct,
      12345L,
      123412351L,
      TimestampType.NO_TIMESTAMP_TYPE,
      inputHeaders
  );

  SinkRecord actualRecord = this.transformation.apply(inputRecord);
  assertNotNull(actualRecord, "record should not be null.");
  assertStruct(expectedStruct, (Struct) actualRecord.value());
}
 
Example 19
Source File: RecordService.java    From snowflake-kafka-connector with Apache License 2.0 4 votes vote down vote up
/**
 * process given SinkRecord,
 * only support snowflake converters
 *
 * @param record SinkRecord
 * @return a record string, already to output
 */
public String processRecord(SinkRecord record)
{
  if (!record.valueSchema().name().equals(SnowflakeJsonSchema.NAME))
  {
    throw SnowflakeErrors.ERROR_0009.getException();
  }
  if (!(record.value() instanceof SnowflakeRecordContent))
  {
    throw SnowflakeErrors.ERROR_0010
      .getException("Input record should be SnowflakeRecordContent object");
  }

  SnowflakeRecordContent valueContent = (SnowflakeRecordContent) record.value();

  ObjectNode meta = MAPPER.createObjectNode();
  if (metadataConfig.topicFlag)
  {
    meta.put(TOPIC, record.topic());
  }
  if (metadataConfig.offsetAndPartitionFlag)
  {
    meta.put(OFFSET, record.kafkaOffset());
    meta.put(PARTITION, record.kafkaPartition());
  }

  //ignore if no timestamp
  if (record.timestampType() != TimestampType.NO_TIMESTAMP_TYPE &&
    metadataConfig.createtimeFlag)
  {
    meta.put(record.timestampType().name, record.timestamp());
  }

  //include schema id if using avro with schema registry
  if (valueContent.getSchemaID() != SnowflakeRecordContent.NON_AVRO_SCHEMA)
  {
    meta.put(SCHEMA_ID, valueContent.getSchemaID());
  }

  putKey(record, meta);

  if (!record.headers().isEmpty())
  {
    meta.set(HEADERS, parseHeaders(record.headers()));
  }


  StringBuilder buffer = new StringBuilder();
  for (JsonNode node : valueContent.getData())
  {
    ObjectNode data = MAPPER.createObjectNode();
    data.set(CONTENT, node);
    if (metadataConfig.allFlag)
    {
      data.set(META, meta);
    }
    buffer.append(data.toString());
  }
  return buffer.toString();
}