Java Code Examples for org.apache.kafka.common.record.TimestampType#CREATE_TIME

The following examples show how to use org.apache.kafka.common.record.TimestampType#CREATE_TIME . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SinkRecordHelper.java    From connect-utils with Apache License 2.0 6 votes vote down vote up
public static SinkRecord delete(String topic, SchemaAndValue key) {
  Preconditions.checkNotNull(topic, "topic cannot be null");
  if (null == key) {
    throw new DataException("key cannot be null.");
  }
  if (null == key.value()) {
    throw new DataException("key cannot be null.");
  }

  return new SinkRecord(
      topic,
      PARTITION,
      key.schema(),
      key.value(),
      null,
      null,
      OFFSET,
      TIMESTAMP,
      TimestampType.CREATE_TIME
  );
}
 
Example 2
Source File: KafkaDecoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldDecodeBrokenCompoundKeysAsMessageKey() {
    final KafkaDecoder decoder = new KafkaDecoder();
    final ConsumerRecord<String,String> record = new ConsumerRecord<>(
            "ch01",
            0,
            42L,
            1234L, TimestampType.CREATE_TIME,
            -1L, -1, -1,
            "record-key",
            null,
            new RecordHeaders(asList(
                    new RecordHeader("_synapse_msg_partitionKey", "1234".getBytes(UTF_8)),
                    new RecordHeader("_synapse_msg_compactionKey", "key-1234".getBytes(UTF_8))
            ))
    );

    // when
    final TextMessage decodedMessage = decoder.apply(record);

    // then
    assertThat(decodedMessage.getKey().isCompoundKey(), is(false));
    assertThat(decodedMessage.getKey().compactionKey(), is("record-key"));
}
 
Example 3
Source File: MirusSourceTaskTest.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private ConsumerRecord<byte[], byte[]> newConsumerRecord(
    String topic, int partition, int offset, Long timestamp, Headers headers) {
  final Long checksum = 1234L;
  final byte[] key = "test-key".getBytes(StandardCharsets.UTF_8);
  final int serializedKeySize = key.length;
  final byte[] value = "test-value".getBytes(StandardCharsets.UTF_8);
  final int serializedValueSize = value.length;
  return new ConsumerRecord<>(
      topic,
      partition,
      offset,
      timestamp,
      TimestampType.CREATE_TIME,
      checksum,
      serializedKeySize,
      serializedValueSize,
      key,
      value,
      headers);
}
 
Example 4
Source File: GcsSinkTaskTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
private SinkRecord createRecord(final String topic,
                                final int partition,
                                final String key,
                                final String value,
                                final int offset,
                                final long timestamp) {
    return new SinkRecord(
        topic,
        partition,
        Schema.BYTES_SCHEMA,
        key.getBytes(StandardCharsets.UTF_8),
        Schema.BYTES_SCHEMA,
        value.getBytes(StandardCharsets.UTF_8),
        offset,
        timestamp,
        TimestampType.CREATE_TIME);
}
 
Example 5
Source File: KafkaRoadConsumerTest.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@Test
public void poll() throws Exception {
  Payload<byte[]> payload = new Payload<>((byte) 0, 1, "{}".getBytes(UTF_8));
  ConsumerRecord<Void, Payload<byte[]>> consumerRecord = new ConsumerRecord<>(topicName, 0, 1L, 2L,
      TimestampType.CREATE_TIME, ConsumerRecord.NULL_CHECKSUM, ConsumerRecord.NULL_SIZE, ConsumerRecord.NULL_SIZE,
      null, payload);
  Map<TopicPartition, List<ConsumerRecord<Void, Payload<byte[]>>>> recordsMaps = singletonMap(topicPartition,
      singletonList(consumerRecord));
  ConsumerRecords<Void, Payload<byte[]>> records = new ConsumerRecords<>(recordsMaps);
  when(consumer.poll(100)).thenReturn(records);
  when(payloadDecoder.decode(any(), any())).thenReturn(mapper.createObjectNode());

  Record record = new Record(0, 1L, 2L, new Payload<JsonNode>((byte) 0, 1, mapper.createObjectNode()));

  underTest.init(1L, rebalanceListener);
  Iterable<Record> result = underTest.poll();

  assertThat(Iterables.size(result), is(1));
  assertThat(Iterables.get(result, 0), is(record));
}
 
Example 6
Source File: KafkaConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) {
  HashMap<String, String> metadata = new HashMap<>();
  metadata.put("kafka-origin", _srcConnString.toString());
  int partition = fromKafka.partition();
  String partitionStr = String.valueOf(partition);
  metadata.put("kafka-origin-partition", partitionStr);
  String offsetStr = String.valueOf(fromKafka.offset());
  metadata.put("kafka-origin-offset", offsetStr);

  long eventsSourceTimestamp = readTime.toEpochMilli();
  if (fromKafka.timestampType() == TimestampType.CREATE_TIME) {
    // If the Kafka header contains the create time. We store the event creation time as event timestamp
    metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(fromKafka.timestamp()));
  } else if (fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME) {
    // If the Kafka header contains the log append time, We use that as event source Timestamp
    // which will be used to calculate the SLA.
    metadata.put(BrooklinEnvelopeMetadataConstants.SOURCE_TIMESTAMP, String.valueOf(fromKafka.timestamp()));
    metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(readTime.toEpochMilli()));
    eventsSourceTimestamp = fromKafka.timestamp();
  }

  BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata);
  DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder();
  builder.addEvent(envelope);
  builder.setEventsSourceTimestamp(eventsSourceTimestamp);
  builder.setPartition(partition); // assume source partition count is same as dest
  builder.setSourceCheckpoint(partitionStr + "-" + offsetStr);

  return builder.build();
}
 
Example 7
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testFilter() throws Exception {
  // Create consumer record processor
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  String message0 = "message0";
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", message0));

  // Let consumer record 1 be a large message.
  byte[] message1Bytes =
      segmentSerializer.serialize("topic",
                                  LiKafkaClientsTestUtils.createLargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, 20, 10));
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1Bytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  ConsumerRecords<String, String> filteredRecords = consumerRecordsProcessor.process(records).consumerRecords();
  ConsumerRecord<String, String> consumerRecord = filteredRecords.iterator().next();
  assertEquals(filteredRecords.count(), 1, "Only one record should be there after filtering.");
  assertEquals(consumerRecord0.topic(), consumerRecord.topic(), "Topic should match");
  assertEquals(consumerRecord0.partition(), consumerRecord.partition(), "partition should match");
  assertTrue(Arrays.equals(consumerRecord0.key(), consumerRecord.key().getBytes()), "key should match");
  assertEquals(consumerRecord0.offset(), consumerRecord.offset(), "Offset should match");
  assertEquals(consumerRecord.value(), "message0", "\"message0\" should be the value");
}
 
Example 8
Source File: KafkaDecoderTest.java    From synapse with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldDecodeMessageHeaders() {
    // given
    final KafkaDecoder decoder = new KafkaDecoder();
    final ConsumerRecord<String,String> record = new ConsumerRecord<>(
            "ch01",
            0,
            42L,
            1234L, TimestampType.CREATE_TIME,
            -1L, -1, -1,
            "key",
            null,
            new RecordHeaders(asList(
                    new RecordHeader("foo", "foovalue".getBytes(UTF_8)),
                    new RecordHeader("bar", "barvalue".getBytes(UTF_8))
            ))
    );

    // when
    final TextMessage decodedMessage = decoder.apply(record);

    // then
    final Map<String, String> expectedHeaders = ImmutableMap.of(
            "foo", "foovalue",
            "bar", "barvalue"
    );
    assertThat(decodedMessage.getHeader().getAll(), is(expectedHeaders));
}
 
Example 9
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test(expectedExceptions = OffsetNotTrackedException.class)
public void testStartingOffsetWithNormalMessages() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 100L, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();

  TopicPartition tp = new TopicPartition("topic", 0);
  assertEquals(consumerRecordsProcessor.startingOffset(tp, 100L), 100, "Should return 100 because there are no " +
      "large messages in the partition.");

  // Should throw exception when an offset cannot be found by the offset tracker.
  consumerRecordsProcessor.startingOffset(tp, 0L);
}
 
Example 10
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testNullValue() {
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();
  ConsumerRecord<byte[], byte[]> consumerRecord =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), null);
  ConsumerRecords<byte[], byte[]> consumerRecords =
      new ConsumerRecords<>(Collections.singletonMap(new TopicPartition("topic", 0), Collections.singletonList(consumerRecord)));
  ConsumerRecords<String, String> processedRecords = consumerRecordsProcessor.process(consumerRecords).consumerRecords();
  assertNull(processedRecords.iterator().next().value());
}
 
Example 11
Source File: ConsumerRecordsIteratorWrapper.java    From apm-agent-java with Apache License 2.0 5 votes vote down vote up
@Override
public ConsumerRecord next() {
    endCurrentTransaction();
    ConsumerRecord record = delegate.next();
    try {
        String topic = record.topic();
        if (!WildcardMatcher.isAnyMatch(messagingConfiguration.getIgnoreMessageQueues(), topic)) {
            Transaction transaction = tracer.startChildTransaction(record, KafkaRecordHeaderAccessor.instance(), ConsumerRecordsIteratorWrapper.class.getClassLoader());
            if (transaction != null) {
                transaction.withType("messaging").withName("Kafka record from " + topic).activate();
                transaction.setFrameworkName(FRAMEWORK_NAME);

                Message message = transaction.getContext().getMessage();
                message.withQueue(topic);
                if (record.timestampType() == TimestampType.CREATE_TIME) {
                    message.withAge(System.currentTimeMillis() - record.timestamp());
                }

                if (transaction.isSampled() && coreConfiguration.isCaptureHeaders()) {
                    for (Header header : record.headers()) {
                        String key = header.key();
                        if (!TraceContext.TRACE_PARENT_BINARY_HEADER_NAME.equals(key) &&
                            WildcardMatcher.anyMatch(coreConfiguration.getSanitizeFieldNames(), key) == null) {
                            message.addHeader(key, header.value());
                        }
                    }
                }

                if (transaction.isSampled() && coreConfiguration.getCaptureBody() != CoreConfiguration.EventType.OFF) {
                    message.appendToBody("key=").appendToBody(String.valueOf(record.key())).appendToBody("; ")
                        .appendToBody("value=").appendToBody(String.valueOf(record.value()));
                }
            }
        }
    } catch (Exception e) {
        logger.error("Error in transaction creation based on Kafka record", e);
    }
    return record;
}
 
Example 12
Source File: Records.java    From kafka-connect-solr with Apache License 2.0 5 votes vote down vote up
static StructTestCase struct() {
  StructTestCase testCase = new StructTestCase();

  Schema schema = SchemaBuilder.struct()
      .name("Testing")
      .field("firstName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("lastName", Schema.OPTIONAL_STRING_SCHEMA)
      .field("email", Schema.OPTIONAL_STRING_SCHEMA)
      .field("age", Schema.OPTIONAL_INT32_SCHEMA)
      .build();
  testCase.struct = new Struct(schema)
      .put("firstName", "example")
      .put("lastName", "user")
      .put("email", "[email protected]")
      .put("age", 27);
  testCase.record = new SinkRecord(
      "testing",
      1,
      null,
      null,
      null,
      testCase.struct,
      2L,
      1484897702123L,
      TimestampType.CREATE_TIME
  );

  return testCase;
}
 
Example 13
Source File: JsonPayloadFormatterTest.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
@Test
public void testTimestampTypesSinkRecord() throws IOException {
  TimestampType[] timestampTypes = {
      TimestampType.LOG_APPEND_TIME,
      TimestampType.CREATE_TIME,
      TimestampType.NO_TIMESTAMP_TYPE
  };

  for (TimestampType t : timestampTypes) {
    final SinkRecord record = new SinkRecord(
        TEST_TOPIC,
        TEST_PARTITION,
        null,
        null,
        null,
        null,
        TEST_OFFSET,
        TEST_TIMESTAMP,
        t
    );
    final String result = formatter.format(record);
    debugShow(record, result);

    Payload payload = new Payload<>();
    payload = mapper.readValue(result, payload.getClass());
    assertEquals(t.toString(), payload.getTimestampTypeName());
  }
}
 
Example 14
Source File: LambdaSinkTaskTest.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
@Ignore("Test is ignored as a demonstration -- needs credentials")
@Test
public void testPutWhenBatchingIsNotEnabled() {

    ImmutableMap<String, String> props =
            new ImmutableMap.Builder<String, String>()
                    .put("connector.class", "com.nordstrom.kafka.connect.lambda.LambdaSinkConnector")
                    .put("tasks.max", "1")
                    .put("aws.lambda.function.arn", "arn:aws:lambda:us-west-2:123456789123:function:test-lambda")
                    .put("aws.lambda.invocation.timeout.ms", "300000")
                    .put("aws.lambda.invocation.mode", "SYNC")
                    .put("aws.lambda.batch.enabled", "false")
                    .put("key.converter", "org.apache.kafka.connect.storage.StringConverter")
                    .put("value.converter", "org.apache.kafka.connect.storage.StringConverter")
                    .put("topics", "connect-lambda-test")
                    .build();

    LambdaSinkTask task = new LambdaSinkTask();
    task.initialize(mock(SinkTaskContext.class));

    task.start(props);

    InvocationClient mockedClient = mock(InvocationClient.class);

    when(mockedClient.invoke(any()))
        .thenReturn(new InvocationResponse(200, "test log", "", Instant.now(), Instant.now()));

    Schema testSchema = SchemaBuilder.struct().name("com.nordstrom.kafka.connect.lambda.foo").field("bar", STRING_SCHEMA).build();

    SinkRecord testRecord = new SinkRecord("connect-lambda-test", 0, STRING_SCHEMA, "sometestkey", testSchema, "testing", 0, 0L, TimestampType.CREATE_TIME);
    Collection<SinkRecord> testList = new ArrayList<>();
    testList.add(testRecord);

    task.put(testList);
}
 
Example 15
Source File: KafkaConsumerUtil.java    From beast with Apache License 2.0 5 votes vote down vote up
public ConsumerRecord<byte[], byte[]> createConsumerRecord(String orderNumber, String orderUrl, String orderDetails) {
    TestKey key = TestKey.newBuilder()
            .setOrderNumber(orderNumber)
            .setOrderUrl(orderUrl)
            .build();
    TestMessage message = TestMessage.newBuilder()
            .setOrderNumber(orderNumber)
            .setOrderUrl(orderUrl)
            .setOrderDetails(orderDetails)
            .build();
    return new ConsumerRecord<>(topic, partition, offset++, timestamp, TimestampType.CREATE_TIME, 0, 0, 0, key.toByteArray(), message.toByteArray());
}
 
Example 16
Source File: BqIntegrationTest.java    From beast with Apache License 2.0 5 votes vote down vote up
@Ignore
@Test
public void shouldPushTestNestedRepeatedMessages() throws InvalidProtocolBufferException {
    Instant now = Instant.now();
    long second = now.getEpochSecond();
    ProtoParser protoParser = new ProtoParser(StencilClientFactory.getClient(), TestNestedRepeatedMessage.class.getName());
    TestNestedRepeatedMessage protoMessage = TestNestedRepeatedMessage.newBuilder()
            .addRepeatedMessage(ProtoUtil.generateTestMessage(now))
            .addRepeatedMessage(ProtoUtil.generateTestMessage(now))
            .build();

    TableId tableId = TableId.of("bqsinktest", "nested_messages");
    BqSink bqSink = new BqSink(authenticatedBQ(), tableId, new BQResponseParser(), gcsSinkHandler, bqRow);

    ColumnMapping columnMapping = new ColumnMapping();
    ColumnMapping nested = new ColumnMapping();
    nested.put("record_name", "messsages");
    nested.put("1", "order_number");
    nested.put("2", "order_url");
    columnMapping.put("2", nested);
    ConsumerRecordConverter customConverter = new ConsumerRecordConverter(new RowMapper(columnMapping), protoParser, clock);


    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>("topic", 1, 1, second, TimestampType.CREATE_TIME,
            0, 0, 1, null, protoMessage.toByteArray());

    List<Record> records = customConverter.convert(Collections.singleton(consumerRecord));
    Status push = bqSink.push(new Records(records));
    assertTrue(push.isSuccess());
}
 
Example 17
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testSafeOffsetWithoutLargeMessage() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 and 1 be a normal record.
  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Let consumer record 1 be a normal message.
  byte[] message1Bytes = stringSerializer.serialize("topic", "message1");
  byte[] message1WrappedBytes = wrapMessageBytes(segmentSerializer, message1Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1WrappedBytes);

  // Construct the consumer records.
  TopicPartition tp = new TopicPartition("topic", 0);
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(tp, recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();
  Map<TopicPartition, OffsetAndMetadata> safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit();
  assertEquals(safeOffsets.size(), 1, "Safe offsets should contain one entry");
  assertEquals(safeOffsets.get(tp).offset(), 2, "Safe offset of topic partition 0 should be 2");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 0L).longValue(), 1, "safe offset should be 1");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 1L).longValue(), 2, "safe offset should be 2");

  Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();
  offsetMap.put(tp, new OffsetAndMetadata(1L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 1L, "Safe offset of topic partition 0 should be 1");

  offsetMap.put(tp, new OffsetAndMetadata(2L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 2L, "Safe offset of topic partition 0 should be 2");
}
 
Example 18
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
private ConsumerRecords<byte[], byte[]> getConsumerRecords() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>());

  UUID largeMessageId1 = LiKafkaClientsUtils.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", LiKafkaClientsTestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage1 =
      splitter.split("topic", largeMessageId1, largeMessage1Bytes);

  UUID largeMessageId2 = LiKafkaClientsUtils.randomUUID();
  byte[] largeMessage2Bytes = stringSerializer.serialize("topic", LiKafkaClientsTestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage2 =
      splitter.split("topic", largeMessageId2, largeMessage2Bytes);

  // Let consumer record 0 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), stringSerializer.serialize("topic", "message0"));
  // Let consumer record 1 be a large message segment
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage1.get(0).value());
  // Let consumer record 2 be a normal message
  ConsumerRecord<byte[], byte[]> consumerRecord2 =
      new ConsumerRecord<>("topic", 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), stringSerializer.serialize("topic", "message1"));
  // Let record 3 be a new large message segment
  ConsumerRecord<byte[], byte[]> consumerRecord3 =
      new ConsumerRecord<>("topic", 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage2.get(0).value());
  // let record 4 completes record 3
  ConsumerRecord<byte[], byte[]> consumerRecord4 =
      new ConsumerRecord<>("topic", 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage2.get(1).value());
  // let record 5 completes record 1
  ConsumerRecord<byte[], byte[]> consumerRecord5 =
      new ConsumerRecord<>("topic", 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage1.get(1).value());

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  recordList.add(consumerRecord2);
  recordList.add(consumerRecord3);
  recordList.add(consumerRecord4);
  recordList.add(consumerRecord5);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  return new ConsumerRecords<>(recordsMap);
}
 
Example 19
Source File: GroupMetadataManager.java    From kop with Apache License 2.0 4 votes vote down vote up
public CompletableFuture<Errors> storeGroup(GroupMetadata group,
                                            Map<String, byte[]> groupAssignment) {

    TimestampType timestampType = TimestampType.CREATE_TIME;
    long timestamp = time.milliseconds();
    byte[] key = groupMetadataKey(group.groupId());
    byte[] value = groupMetadataValue(
        group, groupAssignment, CURRENT_GROUP_VALUE_SCHEMA_VERSION);

    // construct the record
    ByteBuffer buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(
        magicValue,
        compressionType,
        Lists.newArrayList(new SimpleRecord(timestamp, key, value))
    ));
    MemoryRecordsBuilder recordsBuilder = MemoryRecords.builder(
        buffer,
        magicValue,
        compressionType,
        timestampType,
        0L
    );
    recordsBuilder.append(timestamp, key, value);
    MemoryRecords records = recordsBuilder.build();

    return getOffsetsTopicProducer(group.groupId())
        .thenComposeAsync(f -> f.newMessage()
                .keyBytes(key)
                .value(records.buffer())
                .eventTime(timestamp).sendAsync()
            , scheduler)
        .thenApplyAsync(msgId -> {
            if (!isGroupLocal(group.groupId())) {
                if (log.isDebugEnabled()) {
                    log.warn("add partition ownership for group {}",
                        group.groupId());
                }
                addPartitionOwnership(partitionFor(group.groupId()));
            }
            return Errors.NONE;
        }, scheduler)
        .exceptionally(cause -> Errors.COORDINATOR_NOT_AVAILABLE);
}
 
Example 20
Source File: BqIntegrationTest.java    From beast with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldParseAndPushMessagesToBq() throws Exception {
    TableId tableId = TableId.of("bqsinktest", "test_messages");
    BqSink bqSink = new BqSink(bigQueryMock, tableId, new BQResponseParser(), gcsSinkHandler, bqRow);
    String orderNumber = "order-1";
    String orderUrl = "order_url";
    String orderDetails = "order_details";
    Instant now = Instant.now();
    long second = now.getEpochSecond();
    int nano = now.getNano();
    ColumnMapping mapping = new ColumnMapping();
    mapping.put("1", "order_number");
    mapping.put("2", "order_url");
    mapping.put("3", "order_details");
    mapping.put("4", "created_at");
    mapping.put("5", "status");
    mapping.put("6", "discounted_value");
    mapping.put("7", "success");
    mapping.put("8", "order_price");
    mapping.put("12", "aliases");

    ColumnMapping currStateMapping = new ColumnMapping();
    currStateMapping.put("record_name", "current_state");
    currStateMapping.put("1", "key");
    currStateMapping.put("2", "value");
    mapping.put("9", currStateMapping);

    converter = new ConsumerRecordConverter(new RowMapper(mapping), new ProtoParser(StencilClientFactory.getClient(), TestMessage.class.getName()), clock);
    Timestamp createdAt = Timestamp.newBuilder().setSeconds(second).setNanos(nano).build();
    TestKey key = TestKey.newBuilder().setOrderNumber(orderNumber).setOrderUrl(orderUrl).build();
    com.gojek.beast.Status completed = com.gojek.beast.Status.COMPLETED;
    long discount = 1234;
    float price = 1234.5678f;
    TestMessage message = TestMessage.newBuilder()
            .setOrderNumber(orderNumber)
            .setOrderUrl(orderUrl)
            .setOrderDetails(orderDetails)
            .setCreatedAt(createdAt)
            .setStatus(completed)
            .setDiscount(discount)
            .setPrice(price)
            .setSuccess(true)
            .addAliases("alias1").addAliases("alias2")
            .putCurrentState("state_key_1", "state_value_1")
            .putCurrentState("state_key_2", "state_value_2")
            .build();
    String topic = "topic";
    int partition = 1, offset = 1;
    long recordTimestamp = Instant.now().toEpochMilli();
    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>(topic, partition, offset, recordTimestamp, TimestampType.CREATE_TIME,
            0, 0, 1, key.toByteArray(), message.toByteArray());

    List<ConsumerRecord<byte[], byte[]>> messages = Arrays.asList(consumerRecord);
    when(successfulResponse.hasErrors()).thenReturn(false);
    when(bigQueryMock.insertAll(insertRequestCaptor.capture())).thenReturn(successfulResponse);

    List<Record> records = converter.convert(messages);
    Status status = bqSink.push(new Records(records));
    assertTrue(status.isSuccess());

    List<InsertAllRequest.RowToInsert> bqRows = insertRequestCaptor.getValue().getRows();
    assertEquals(1, bqRows.size());
    Map<String, Object> contents = bqRows.get(0).getContent();
    assertEquals("should have same number of columns as mappings, with metadata columns", mapping.size() + 5, contents.size());
    assertEquals(orderUrl, contents.get("order_url"));
    assertEquals(orderNumber, contents.get("order_number"));
    assertEquals(orderDetails, contents.get("order_details"));
    assertEquals(new DateTime(Instant.ofEpochSecond(second, nano).toEpochMilli()), contents.get("created_at"));
    assertEquals(completed.toString(), contents.get("status"));
    assertEquals(discount, contents.get("discounted_value"));
    assertEquals(price, contents.get("order_price"));
    assertEquals(Arrays.asList("alias1", "alias2"), contents.get("aliases"));
    assertTrue(Boolean.valueOf(contents.get("success").toString()));
    containsMetadata(contents, new OffsetInfo(topic, partition, offset, recordTimestamp));
    List repeatedStateMap = (List) contents.get("current_state");
    assertEquals("state_key_1", ((Map) repeatedStateMap.get(0)).get("key"));
    assertEquals("state_value_1", ((Map) repeatedStateMap.get(0)).get("value"));
    assertEquals("state_key_2", ((Map) repeatedStateMap.get(1)).get("key"));
    assertEquals("state_value_2", ((Map) repeatedStateMap.get(1)).get("value"));
}