Java Code Examples for org.apache.kafka.common.serialization.Serializer#serialize()

The following examples show how to use org.apache.kafka.common.serialization.Serializer#serialize() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaAvroSerdesTest.java    From registry with Apache License 2.0 7 votes vote down vote up
@Test
public void testToggleStoringSchemaInHeader() {
    TestRecord record = new TestRecord();
    record.setField1("Hello");
    record.setField2("World");
    String keySchemaHeaderName = KafkaAvroSerde.DEFAULT_KEY_SCHEMA_VERSION_ID;

    for (Boolean storeScheamIdInHeader : Arrays.asList(true, false)) {
        Map<String, Object> configs = new HashMap<>();
        configs.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, storeScheamIdInHeader.toString());
        configs.put(AbstractAvroSnapshotDeserializer.SPECIFIC_AVRO_READER, true);

        KafkaAvroSerde serde = new KafkaAvroSerde(schemaRegistryClient);
        final Serializer<Object> serializer = serde.serializer();
        serializer.configure(configs, true);

        Headers headers = new RecordHeaders();
        final byte[] bytes = serializer.serialize(topic, headers, record);
        Assert.assertEquals(storeScheamIdInHeader, headers.lastHeader(keySchemaHeaderName) != null);

        final Deserializer<Object> deserializer = serde.deserializer();
        deserializer.configure(configs, true);
        final TestRecord actual = (TestRecord) deserializer.deserialize(topic, headers, bytes);
        Assert.assertEquals(record, actual);
    }
}
 
Example 2
Source File: TestDelimitedSerializer.java    From envelope with Apache License 2.0 6 votes vote down vote up
@Test
public void testDelimitedWithDefaultNullSerialization() {
  List<StructField> fields = Lists.newArrayList(
      DataTypes.createStructField("field1", DataTypes.StringType, true),
      DataTypes.createStructField("field2", DataTypes.IntegerType, true),
      DataTypes.createStructField("field3", DataTypes.BooleanType, true)
  );
  Row row = new RowWithSchema(DataTypes.createStructType(fields), null, 1, false);

  Map<String, String> configs = Maps.newHashMap();
  configs.put(DelimitedSerializer.FIELD_DELIMITER_CONFIG_NAME, "||");
  Serializer<Row> serializer = new DelimitedSerializer();
  serializer.configure(configs, false);

  byte[] serialized = serializer.serialize("test", row);
  serializer.close();

  assertEquals(new String(serialized), DelimitedSerializer.USE_FOR_NULL_DEFAULT_VALUE + "||1||false");
}
 
Example 3
Source File: SerializerDeserializerTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testSerde() {
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();

  String s = LiKafkaClientsTestUtils.getRandomString(100);
  assertEquals(s.length(), 100);
  byte[] stringBytes = stringSerializer.serialize("topic", s);
  assertEquals(stringBytes.length, 100);
  LargeMessageSegment segment =
      new LargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, stringBytes.length, ByteBuffer.wrap(stringBytes));
  // String bytes + segment header
  byte[] serializedSegment = segmentSerializer.serialize("topic", segment);
  assertEquals(serializedSegment.length, 1 + stringBytes.length + LargeMessageSegment.SEGMENT_INFO_OVERHEAD + 4);

  LargeMessageSegment deserializedSegment = segmentDeserializer.deserialize("topic", serializedSegment);
  assertEquals(deserializedSegment.messageId, segment.messageId);
  assertEquals(deserializedSegment.messageSizeInBytes, segment.messageSizeInBytes);
  assertEquals(deserializedSegment.numberOfSegments, segment.numberOfSegments);
  assertEquals(deserializedSegment.sequenceNumber, segment.sequenceNumber);
  assertEquals(deserializedSegment.payload.limit(), 100);
  String deserializedString = stringDeserializer.deserialize("topic", deserializedSegment.payloadArray());
  assertEquals(deserializedString.length(), s.length());
}
 
Example 4
Source File: MessageAssemblerTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testTreatBadSegmentAsPayload() {
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();
  MessageAssembler messageAssembler = new MessageAssemblerImpl(100, 100, true, segmentDeserializer);
  TopicPartition tp = new TopicPartition("topic", 0);

  UUID uuid = UUID.randomUUID();
  byte[] realPayload = "message".getBytes();
  LargeMessageSegment badSegment = new LargeMessageSegment(uuid, -1, 100, -1, ByteBuffer.wrap(realPayload));
  byte[] messageWrappedBytes = segmentSerializer.serialize(tp.topic(), badSegment);
  Assert.assertTrue(messageWrappedBytes.length > realPayload.length); //wrapping has been done

  messageAssembler.assemble(tp, 0, messageWrappedBytes);

  MessageAssembler.AssembleResult assembleResult = messageAssembler.assemble(tp, 0, messageWrappedBytes);
  Assert.assertEquals(assembleResult.messageBytes(), messageWrappedBytes);
  Assert.assertEquals(assembleResult.messageStartingOffset(), 0);
  Assert.assertEquals(assembleResult.messageEndingOffset(), 0);
}
 
Example 5
Source File: TestDelimitedSerializer.java    From envelope with Apache License 2.0 6 votes vote down vote up
@Test
public void testDelimitedWithNullSerialization() {
  List<StructField> fields = Lists.newArrayList(
      DataTypes.createStructField("field1", DataTypes.StringType, true),
      DataTypes.createStructField("field2", DataTypes.IntegerType, true),
      DataTypes.createStructField("field3", DataTypes.BooleanType, true)
  );
  Row row = new RowWithSchema(DataTypes.createStructType(fields), null, 1, false);

  Map<String, String> configs = Maps.newHashMap();
  configs.put(DelimitedSerializer.FIELD_DELIMITER_CONFIG_NAME, "||");
  configs.put(DelimitedSerializer.USE_FOR_NULL_CONFIG_NAME, "BANG");
  Serializer<Row> serializer = new DelimitedSerializer();
  serializer.configure(configs, false);

  byte[] serialized = serializer.serialize("test", row);
  serializer.close();

  assertEquals(new String(serialized), "BANG||1||false");
}
 
Example 6
Source File: TestDelimitedSerializer.java    From envelope with Apache License 2.0 6 votes vote down vote up
@Test
public void testDelimitedSerialization() {
  List<StructField> fields = Lists.newArrayList(
      DataTypes.createStructField("field1", DataTypes.StringType, true),
      DataTypes.createStructField("field2", DataTypes.IntegerType, true),
      DataTypes.createStructField("field3", DataTypes.BooleanType, true)
  );
  Row row = new RowWithSchema(DataTypes.createStructType(fields), "hello", 1, false);
  
  Map<String, String> configs = Maps.newHashMap();
  configs.put(DelimitedSerializer.FIELD_DELIMITER_CONFIG_NAME, "||");
  Serializer<Row> serializer = new DelimitedSerializer();
  serializer.configure(configs, false);
  
  byte[] serialized = serializer.serialize("test", row);
  serializer.close();
  
  assertEquals(new String(serialized), "hello||1||false");
}
 
Example 7
Source File: TestAvroSerializer.java    From envelope with Apache License 2.0 5 votes vote down vote up
@Test
public void testAvroSerialization() throws IOException {
  Row row = Contexts.getSparkSession().sql("SELECT " +
      "'hello' field1, " +
      "true field2, " +
      "BINARY('world') field3, " +
      "CAST(1.0 AS DOUBLE) field4, " +
      "CAST(1 AS INT) field5, " +
      "CAST(1.0 AS FLOAT) field6, " +
      "CAST(1 AS BIGINT) field7, " +
      "NULL field8, NULL field9, NULL field10, NULL field11, NULL field12, NULL field13, NULL field14"
  ).collectAsList().get(0);
  
  Map<String, String> configs = Maps.newHashMap();
  configs.put(AvroSerializer.SCHEMA_PATH_CONFIG_NAME, getClass().getResource("/kafka/serde/avro-serialization-test.avsc").getFile());
  Serializer<Row> serializer = new AvroSerializer();
  serializer.configure(configs, false);
  
  byte[] serialized = serializer.serialize("test", row);
  serializer.close();
  
  Schema schema = new Schema.Parser().parse(new File(getClass().getResource("/kafka/serde/avro-serialization-test.avsc").getFile()));
  GenericDatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>(schema);
  Decoder decoder = DecoderFactory.get().binaryDecoder(serialized, null);
  GenericRecord deserialized = reader.read(null, decoder);

  assertEquals("hello", deserialized.get("field1").toString());
  assertEquals(true, deserialized.get("field2"));
  assertEquals("world", new String(((ByteBuffer) deserialized.get("field3")).array()));
  assertEquals(1.0d, deserialized.get("field4"));
  assertEquals(1, deserialized.get("field5"));
  assertEquals(1.0f, deserialized.get("field6"));
  assertEquals(1L, deserialized.get("field7"));
  for (int i = 8; i <= 14; i++) {
    assertNull(deserialized.get("field" + i));
  }
}
 
Example 8
Source File: MessageSplitterTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testSplit() {
  TopicPartition tp = new TopicPartition("topic", 0);
  UUID id = LiKafkaClientsUtils.randomUUID();
  String message = LiKafkaClientsTestUtils.getRandomString(1000);
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();
  MessageSplitter splitter = new MessageSplitterImpl(200, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>());

  byte[] serializedMessage = stringSerializer.serialize("topic", message);
  List<ProducerRecord<byte[], byte[]>> records = splitter.split("topic", id, serializedMessage);
  assertEquals(records.size(), 5, "Should have 6 segments.");
  MessageAssembler assembler = new MessageAssemblerImpl(10000, 10000, true, segmentDeserializer);
  String assembledMessage = null;
  UUID uuid = null;
  for (int i = 0; i < records.size(); i++) {
    ProducerRecord<byte[], byte[]> record = records.get(i);
    LargeMessageSegment segment = segmentDeserializer.deserialize("topic", record.value());
    if (uuid == null) {
      uuid = segment.messageId;
    } else {
      assertEquals(segment.messageId, uuid, "messageId should match.");
    }
    assertEquals(segment.numberOfSegments, 5, "segment number should be 5");
    assertEquals(segment.messageSizeInBytes, serializedMessage.length, "message size should the same");
    assertEquals(segment.sequenceNumber, i, "SequenceNumber should match");

    assembledMessage = stringDeserializer.deserialize(null, assembler.assemble(tp, i, record.value()).messageBytes());
  }
  assertEquals(assembledMessage, message, "messages should match.");
}
 
Example 9
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testFilter() throws Exception {
  // Create consumer record processor
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  String message0 = "message0";
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", message0));

  // Let consumer record 1 be a large message.
  byte[] message1Bytes =
      segmentSerializer.serialize("topic",
                                  LiKafkaClientsTestUtils.createLargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, 20, 10));
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1Bytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  ConsumerRecords<String, String> filteredRecords = consumerRecordsProcessor.process(records).consumerRecords();
  ConsumerRecord<String, String> consumerRecord = filteredRecords.iterator().next();
  assertEquals(filteredRecords.count(), 1, "Only one record should be there after filtering.");
  assertEquals(consumerRecord0.topic(), consumerRecord.topic(), "Topic should match");
  assertEquals(consumerRecord0.partition(), consumerRecord.partition(), "partition should match");
  assertTrue(Arrays.equals(consumerRecord0.key(), consumerRecord.key().getBytes()), "key should match");
  assertEquals(consumerRecord0.offset(), consumerRecord.offset(), "Offset should match");
  assertEquals(consumerRecord.value(), "message0", "\"message0\" should be the value");
}
 
Example 10
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test(expectedExceptions = OffsetNotTrackedException.class)
public void testStartingOffsetWithNormalMessages() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 100L, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();

  TopicPartition tp = new TopicPartition("topic", 0);
  assertEquals(consumerRecordsProcessor.startingOffset(tp, 100L), 100, "Should return 100 because there are no " +
      "large messages in the partition.");

  // Should throw exception when an offset cannot be found by the offset tracker.
  consumerRecordsProcessor.startingOffset(tp, 0L);
}
 
Example 11
Source File: KafkaAvroSerdesTest.java    From registry with Apache License 2.0 5 votes vote down vote up
private void testSchemaHeaderNames(String customKeySchemaHeaderName,
                                   String customValueSchemaHeaderName) {
    TestRecord record = new TestRecord();
    record.setField1("Hello");
    record.setField2("World");

    Map<String, Object> configs = new HashMap<>();
    configs.put(KafkaAvroSerde.KEY_SCHEMA_VERSION_ID_HEADER_NAME, customKeySchemaHeaderName);
    configs.put(KafkaAvroSerde.VALUE_SCHEMA_VERSION_ID_HEADER_NAME, customValueSchemaHeaderName);
    configs.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, "true");
    configs.put(AbstractAvroSnapshotDeserializer.SPECIFIC_AVRO_READER, true);

    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    AvroSerDesHandler handler = new DefaultAvroSerDesHandler();
    handler.handlePayloadSerialization(outputStream, record);

    for (Boolean isKey : Arrays.asList(true, false)) {
        KafkaAvroSerde serde = new KafkaAvroSerde(schemaRegistryClient);
        final Serializer<Object> serializer = serde.serializer();
        serializer.configure(configs, isKey);

        Headers headers = new RecordHeaders();
        final byte[] bytes = serializer.serialize(topic, headers, record);
        Assert.assertArrayEquals(outputStream.toByteArray(), bytes);
        Assert.assertEquals(isKey, headers.lastHeader(customKeySchemaHeaderName) != null);
        Assert.assertEquals(!isKey, headers.lastHeader(customValueSchemaHeaderName) != null);

        final Deserializer<Object> deserializer = serde.deserializer();
        deserializer.configure(configs, isKey);
        final TestRecord actual = (TestRecord) deserializer.deserialize(topic, headers, bytes);
        Assert.assertEquals(record, actual);
    }
}
 
Example 12
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testEviction() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>());

  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();
  consumerRecordsProcessor.process(getConsumerRecords()).consumerRecords();
  // The offset tracker now has 2, 4, 5 in it.
  TopicPartition tp = new TopicPartition("topic", 0);

  UUID largeMessageId = LiKafkaClientsUtils.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", LiKafkaClientsTestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage =
      splitter.split("topic", largeMessageId, largeMessage1Bytes);

  // Test evict
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<ConsumerRecord<byte[], byte[]>>();
  // Let consumer record 6 be a large message segment.
  ConsumerRecord<byte[], byte[]> consumerRecord6 =
      new ConsumerRecord<>("topic", 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(0).value());
  // Let consumer record 7 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord7 =
      new ConsumerRecord<>("topic", 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", "message7"));
  // Let consumer record 8 completes consumer record 6
  ConsumerRecord<byte[], byte[]> consumerRecord8 =
      new ConsumerRecord<>("topic", 0, 8, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(1).value());

  recordList.add(consumerRecord6);
  recordList.add(consumerRecord7);
  recordList.add(consumerRecord8);

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);
  consumerRecordsProcessor.process(records).consumerRecords();
  // Now the offset tracker should have 4, 5, 6, 8 in side it.
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 7L).longValue(), 6, "safe offset should be 6");

  try {
    consumerRecordsProcessor.safeOffset(tp, 2L);
    fail("Should throw exception because offset for message 2 should have been evicted.");
  } catch (OffsetNotTrackedException onte) {
    assertTrue(onte.getMessage().startsWith("Offset 2 for partition"));
  }
}
 
Example 13
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
private byte[] wrapMessageBytes(Serializer<LargeMessageSegment> segmentSerializer, byte[] messageBytes) {
  return segmentSerializer.serialize("topic",
                                     new LargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 1, messageBytes.length,
                                                             ByteBuffer.wrap(messageBytes)));
}
 
Example 14
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
private ConsumerRecords<byte[], byte[]> getConsumerRecords() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>());

  UUID largeMessageId1 = LiKafkaClientsUtils.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", LiKafkaClientsTestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage1 =
      splitter.split("topic", largeMessageId1, largeMessage1Bytes);

  UUID largeMessageId2 = LiKafkaClientsUtils.randomUUID();
  byte[] largeMessage2Bytes = stringSerializer.serialize("topic", LiKafkaClientsTestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage2 =
      splitter.split("topic", largeMessageId2, largeMessage2Bytes);

  // Let consumer record 0 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), stringSerializer.serialize("topic", "message0"));
  // Let consumer record 1 be a large message segment
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage1.get(0).value());
  // Let consumer record 2 be a normal message
  ConsumerRecord<byte[], byte[]> consumerRecord2 =
      new ConsumerRecord<>("topic", 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), stringSerializer.serialize("topic", "message1"));
  // Let record 3 be a new large message segment
  ConsumerRecord<byte[], byte[]> consumerRecord3 =
      new ConsumerRecord<>("topic", 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage2.get(0).value());
  // let record 4 completes record 3
  ConsumerRecord<byte[], byte[]> consumerRecord4 =
      new ConsumerRecord<>("topic", 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage2.get(1).value());
  // let record 5 completes record 1
  ConsumerRecord<byte[], byte[]> consumerRecord5 =
      new ConsumerRecord<>("topic", 0, 5, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage1.get(1).value());

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  recordList.add(consumerRecord2);
  recordList.add(consumerRecord3);
  recordList.add(consumerRecord4);
  recordList.add(consumerRecord5);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  return new ConsumerRecords<>(recordsMap);
}
 
Example 15
Source File: RegistrySerdeTest.java    From apicurio-registry with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@RegistryServiceTest
public void testConfiguration(Supplier<RegistryService> supplier) throws Exception {
    Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord3\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}");

    String artifactId = generateArtifactId();

    CompletionStage<ArtifactMetaData> csa = supplier.get().createArtifact(
        ArtifactType.AVRO,
        artifactId + "-myrecord3",
        null, 
        new ByteArrayInputStream(schema.toString().getBytes(StandardCharsets.UTF_8))
    );
    ArtifactMetaData amd = ConcurrentUtil.result(csa);
    // reset any cache
    supplier.get().reset();
    // wait for global id store to populate (in case of Kafka / Streams)
    ArtifactMetaData amdById = retry(() -> supplier.get().getArtifactMetaDataByGlobalId(amd.getGlobalId()));
    Assertions.assertNotNull(amdById);

    GenericData.Record record = new GenericData.Record(schema);
    record.put("bar", "somebar");

    Map<String, Object> config = new HashMap<>();
    config.put(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, "http://localhost:8081/api");
    config.put(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, new TopicRecordIdStrategy());
    config.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, new FindLatestIdStrategy<>());
    config.put(AvroDatumProvider.REGISTRY_AVRO_DATUM_PROVIDER_CONFIG_PARAM, new DefaultAvroDatumProvider<>());
    Serializer<GenericData.Record> serializer = (Serializer<GenericData.Record>) getClass().getClassLoader()
                                                                                           .loadClass(AvroKafkaSerializer.class.getName())
                                                                                           .newInstance();
    serializer.configure(config, true);
    byte[] bytes = serializer.serialize(artifactId, record);

    Deserializer<GenericData.Record> deserializer = (Deserializer<GenericData.Record>) getClass().getClassLoader()
                                                                                                 .loadClass(AvroKafkaDeserializer.class.getName())
                                                                                                 .newInstance();
    deserializer.configure(config, true);

    record = deserializer.deserialize(artifactId, bytes);
    Assertions.assertEquals("somebar", record.get("bar").toString());

    config.put(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, TopicRecordIdStrategy.class);
    config.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class);
    config.put(AvroDatumProvider.REGISTRY_AVRO_DATUM_PROVIDER_CONFIG_PARAM, DefaultAvroDatumProvider.class);
    serializer.configure(config, true);
    bytes = serializer.serialize(artifactId, record);
    deserializer.configure(config, true);
    record = deserializer.deserialize(artifactId, bytes);
    Assertions.assertEquals("somebar", record.get("bar").toString());

    config.put(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, TopicRecordIdStrategy.class.getName());
    config.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName());
    config.put(AvroDatumProvider.REGISTRY_AVRO_DATUM_PROVIDER_CONFIG_PARAM, DefaultAvroDatumProvider.class.getName());
    serializer.configure(config, true);
    bytes = serializer.serialize(artifactId, record);
    deserializer.configure(config, true);
    record = deserializer.deserialize(artifactId, bytes);
    Assertions.assertEquals("somebar", record.get("bar").toString());

    serializer.close();
    deserializer.close();
}
 
Example 16
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testSafeOffsetWithoutLargeMessage() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 and 1 be a normal record.
  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Let consumer record 1 be a normal message.
  byte[] message1Bytes = stringSerializer.serialize("topic", "message1");
  byte[] message1WrappedBytes = wrapMessageBytes(segmentSerializer, message1Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord1 =
      new ConsumerRecord<>("topic", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message1WrappedBytes);

  // Construct the consumer records.
  TopicPartition tp = new TopicPartition("topic", 0);
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  recordList.add(consumerRecord1);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap =
      new HashMap<>();
  recordsMap.put(tp, recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();
  Map<TopicPartition, OffsetAndMetadata> safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit();
  assertEquals(safeOffsets.size(), 1, "Safe offsets should contain one entry");
  assertEquals(safeOffsets.get(tp).offset(), 2, "Safe offset of topic partition 0 should be 2");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 0L).longValue(), 1, "safe offset should be 1");
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 1L).longValue(), 2, "safe offset should be 2");

  Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();
  offsetMap.put(tp, new OffsetAndMetadata(1L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 1L, "Safe offset of topic partition 0 should be 1");

  offsetMap.put(tp, new OffsetAndMetadata(2L));
  safeOffsets = consumerRecordsProcessor.safeOffsetsToCommit(offsetMap, false);
  assertEquals(safeOffsets.get(tp).offset(), 2L, "Safe offset of topic partition 0 should be 2");
}
 
Example 17
Source File: MessageAssemblerTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
private byte[] wrapMessageBytes(Serializer<LargeMessageSegment> segmentSerializer, byte[] messageBytes) {
  return segmentSerializer.serialize("topic",
      new LargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 1, messageBytes.length, ByteBuffer.wrap(messageBytes)));
}
 
Example 18
Source File: LiKafkaProducerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * This test produces test data into a temporary topic to a particular broker with a non-deseriable value
 * verifies producer.send() will throw exception if and only if SKIP_RECORD_ON_SKIPPABLE_EXCEPTION_CONFIG is false
 */
@Test
public void testSerializationException() throws Exception {

  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<String> errorThrowingSerializer = new Serializer<String>() {
    @Override
    public void configure(Map<String, ?> configs, boolean isKey) {

    }

    @Override
    public byte[] serialize(String topic, String value) {
      if (value.equals("ErrorBytes")) {
        throw new SkippableException();
      }
      return stringSerializer.serialize(topic, value);
    }

    @Override
    public void close() {

    }
  };

  Properties props = getProducerProperties(null);
  props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
  String tempTopic = "testTopic" + new Random().nextInt(1000000);
  createTopic(tempTopic);
  try (LiKafkaProducer<String, String> producer = new LiKafkaProducerImpl<>(props, stringSerializer, errorThrowingSerializer, null, null)) {
    producer.send(new ProducerRecord<>(tempTopic, "ErrorBytes"));
    producer.send(new ProducerRecord<>(tempTopic, "value"));
  }

  Properties consumerProps = new Properties();
  consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  int messageCount = 0;
  try (LiKafkaConsumer<String, String> consumer = createConsumer(consumerProps)) {
    consumer.subscribe(Collections.singleton(tempTopic));
    long startMs = System.currentTimeMillis();
    while (messageCount < 1 && System.currentTimeMillis() < startMs + 30000) {
      ConsumerRecords<String, String> records = consumer.poll(100);
      for (ConsumerRecord<String, String> record : records) {
        assertEquals("value", record.value());
        messageCount++;
      }
    }
  }
  assertEquals(1, messageCount);
}
 
Example 19
Source File: PregelComputation.java    From kafka-graphs with Apache License 2.0 4 votes vote down vote up
private static <K> int vertexToPartition(K vertex, Serializer<K> serializer, int numPartitions) {
    // TODO make configurable, currently this is tied to DefaultStreamPartitioner
    byte[] keyBytes = serializer.serialize(null, vertex);
    int partition = Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
    return partition;
}