Java Code Examples for org.apache.kafka.connect.data.Schema#BYTES_SCHEMA

The following examples show how to use org.apache.kafka.connect.data.Schema#BYTES_SCHEMA . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GcsSinkTaskTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
private SinkRecord createRecordStringKey(final String topic,
                                         final int partition,
                                         final String key,
                                         final String value,
                                         final int offset,
                                         final long timestamp) {
    return new SinkRecord(
        topic,
        partition,
        Schema.OPTIONAL_STRING_SCHEMA,
        key,
        Schema.BYTES_SCHEMA,
        value.getBytes(StandardCharsets.UTF_8),
        offset,
        timestamp,
        TimestampType.CREATE_TIME);
}
 
Example 2
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/** Tests that the no partition is assigned when the partition scheme is "kafka_partitioner". */
@Test
public void testPollWithPartitionSchemeKafkaPartitioner() throws Exception {
  props.put(
          CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
          CloudPubSubSourceConnector.PartitionScheme.KAFKA_PARTITIONER.toString());
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
          new SourceRecord(
                  null,
                  null,
                  KAFKA_TOPIC,
                  null,
                  Schema.OPTIONAL_STRING_SCHEMA,
                  null,
                  Schema.BYTES_SCHEMA,
                  KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
  assertNull(result.get(0).kafkaPartition());
}
 
Example 3
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/** Tests that the correct partition is assigned when the partition scheme is "hash_value". */
@Test
public void testPollWithPartitionSchemeHashValue() throws Exception {
  props.put(
      CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
      CloudPubSubSourceConnector.PartitionScheme.HASH_VALUE.toString());
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          KAFKA_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS),
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 4
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE} and {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageTimestampAttribute() throws Exception{
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE, KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
          new SourceRecord(
                  null,
                  null,
                  KAFKA_TOPIC,
                  0,
                  Schema.OPTIONAL_STRING_SCHEMA,
                  KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
                  Schema.BYTES_SCHEMA,
                  KAFKA_VALUE, Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE));
  assertRecordsEqual(expected, result.get(0));
}
 
Example 5
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageKeyAttribute() throws Exception {
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 6
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do not have an attribute that matches
 * {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithNoMessageKeyAttribute() throws Exception {
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 7
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Compare two SourceRecords. This is necessary because the records' values contain a byte[] and
 * the .equals on a SourceRecord does not take this into account.
 */
public void assertRecordsEqual(SourceRecord sr1, SourceRecord sr2) {
  assertEquals(sr1.key(), sr2.key());
  assertEquals(sr1.keySchema(), sr2.keySchema());
  assertEquals(sr1.valueSchema(), sr2.valueSchema());
  assertEquals(sr1.topic(), sr2.topic());

  if (sr1.valueSchema() == Schema.BYTES_SCHEMA) {
    assertArrayEquals((byte[])sr1.value(), (byte[])sr2.value());
  } else {
    for(Field f : sr1.valueSchema().fields()) {
      if (f.name().equals(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD)) {
        assertArrayEquals(((Struct)sr1.value()).getBytes(f.name()),
                          ((Struct)sr2.value()).getBytes(f.name()));
      } else {
        assertEquals(((Struct)sr1.value()).getString(f.name()),
                     ((Struct)sr2.value()).getString(f.name()));
      }
    }
  }
}
 
Example 8
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 6 votes vote down vote up
private SourceRecord createRecordWithHeaders(Map<String, String> messageAttributes, Map<String,String> ack,
                                             String key, byte[] messageBytes, Long timestamp) {
  ConnectHeaders headers = new ConnectHeaders();
  for (Entry<String, String> attribute :
          messageAttributes.entrySet()) {
    if (!attribute.getKey().equals(kafkaMessageKeyAttribute)) {
      headers.addString(attribute.getKey(), attribute.getValue());
    }
  }

  return new SourceRecord(
          null,
          ack,
          kafkaTopic,
          selectPartition(key, messageBytes),
          Schema.OPTIONAL_STRING_SCHEMA,
          key,
          Schema.BYTES_SCHEMA,
          messageBytes,
          timestamp,
          headers);
}
 
Example 9
Source File: BytesToStringTest.java    From kafka-connect-transform-common with Apache License 2.0 6 votes vote down vote up
@Test
public void bytes() throws UnsupportedEncodingException {
  this.transformation.configure(
      ImmutableMap.of()
  );
  final String expected =  "this is a test";
  final SinkRecord inputRecord = new SinkRecord(
      "topic",
      1,
      null,
      null,
      Schema.BYTES_SCHEMA,
      expected.getBytes("UTF-8"),
      1L
  );

  SinkRecord outputRecord = this.transformation.apply(inputRecord);
  assertEquals(expected, outputRecord.value());
  assertSchema(Schema.STRING_SCHEMA, outputRecord.valueSchema());
}
 
Example 10
Source File: RedisSinkTaskTest.java    From kafka-connect-redis with Apache License 2.0 6 votes vote down vote up
SinkRecord record(String k, String v) {
  final byte[] key = k.getBytes(Charsets.UTF_8);
  final Schema keySchema = Schema.BYTES_SCHEMA;
  final byte[] value;
  final Schema valueSchema;

  if (Strings.isNullOrEmpty(v)) {
    value = null;
    valueSchema = null;
  } else {
    value = v.getBytes(Charsets.UTF_8);
    valueSchema = Schema.BYTES_SCHEMA;
  }

  return new SinkRecord(
      "topic",
      1,
      keySchema,
      key,
      valueSchema,
      value,
      offset++
  );

}
 
Example 11
Source File: UnivocityFileReader.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
private Schema strToSchema(String dataType) {
    switch (DataType.valueOf(dataType.trim().toUpperCase())) {
        case BYTE:
            return dataTypeMappingError && !allowNulls ? Schema.INT8_SCHEMA : Schema.OPTIONAL_INT8_SCHEMA;
        case SHORT:
            return dataTypeMappingError && !allowNulls ? Schema.INT16_SCHEMA : Schema.OPTIONAL_INT16_SCHEMA;
        case INT:
            return dataTypeMappingError && !allowNulls ? Schema.INT32_SCHEMA : Schema.OPTIONAL_INT32_SCHEMA;
        case LONG:
            return dataTypeMappingError && !allowNulls ? Schema.INT64_SCHEMA : Schema.OPTIONAL_INT64_SCHEMA;
        case FLOAT:
            return dataTypeMappingError && !allowNulls ? Schema.FLOAT32_SCHEMA : Schema.OPTIONAL_FLOAT32_SCHEMA;
        case DOUBLE:
            return dataTypeMappingError && !allowNulls ? Schema.FLOAT64_SCHEMA : Schema.OPTIONAL_FLOAT64_SCHEMA;
        case BOOLEAN:
            return dataTypeMappingError && !allowNulls ? Schema.BOOLEAN_SCHEMA : Schema.OPTIONAL_BOOLEAN_SCHEMA;
        case BYTES:
            return dataTypeMappingError && !allowNulls ? Schema.BYTES_SCHEMA : Schema.OPTIONAL_BYTES_SCHEMA;
        case STRING:
        default:
            return dataTypeMappingError && !allowNulls ? Schema.STRING_SCHEMA : Schema.OPTIONAL_STRING_SCHEMA;
    }
}
 
Example 12
Source File: KafkaMonitor.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private String applyRoutersToTopic(String topic) {
  TopicPartition topicPartition = new TopicPartition(topic, 0);
  Map<String, Object> sourcePartition = TopicPartitionSerDe.asMap(topicPartition);
  SourceRecord record =
      new SourceRecord(
          sourcePartition,
          null,
          topicPartition.topic(),
          topicPartition.partition(),
          Schema.BYTES_SCHEMA,
          null,
          Schema.OPTIONAL_BYTES_SCHEMA,
          null);
  for (Transformation<SourceRecord> transform : this.routers) {
    record = transform.apply(record);
  }
  return record.topic();
}
 
Example 13
Source File: GcsSinkTaskTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
private SinkRecord createRecord(final String topic,
                                final int partition,
                                final String key,
                                final String value,
                                final int offset,
                                final long timestamp) {
    return new SinkRecord(
        topic,
        partition,
        Schema.BYTES_SCHEMA,
        key.getBytes(StandardCharsets.UTF_8),
        Schema.BYTES_SCHEMA,
        value.getBytes(StandardCharsets.UTF_8),
        offset,
        timestamp,
        TimestampType.CREATE_TIME);
}
 
Example 14
Source File: GcsSinkTaskTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 5 votes vote down vote up
private SinkRecord createNullRecord(final String topic,
                                    final int partition,
                                    final int offset) {
    return new SinkRecord(
        topic,
        partition,
        Schema.BYTES_SCHEMA,
        null,
        Schema.BYTES_SCHEMA,
        null,
        offset,
        null,
        TimestampType.NO_TIMESTAMP_TYPE);
}
 
Example 15
Source File: DumbProcessor.java    From kafka-connect-mqtt with MIT License 4 votes vote down vote up
@Override
public SourceRecord[] getRecords(String kafkaTopic) {
    return new SourceRecord[]{new SourceRecord(null, null, kafkaTopic, null,
            Schema.STRING_SCHEMA, mTopic,
            Schema.BYTES_SCHEMA, mMessage.getPayload())};
}
 
Example 16
Source File: BytesFieldConverter.java    From kafka-connect-mongodb with Apache License 2.0 4 votes vote down vote up
public BytesFieldConverter() {
    super(Schema.BYTES_SCHEMA);
}
 
Example 17
Source File: ByteArrayConverter.java    From mirus with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
  return new SchemaAndValue(Schema.BYTES_SCHEMA, value);
}
 
Example 18
Source File: BytesFieldConverter.java    From mongo-kafka with Apache License 2.0 4 votes vote down vote up
public BytesFieldConverter() {
  super(Schema.BYTES_SCHEMA);
}
 
Example 19
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the correct partition is assigned when the partition scheme is "round_robin". The
 * tests makes sure to submit an approrpriate number of messages to poll() so that all partitions
 * in the round robin are hit once.
 */
@Test
public void testPollWithPartitionSchemeRoundRobin() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm3 = createReceivedMessage(ACK_ID3, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm4 = createReceivedMessage(ACK_ID4, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse =
      PullResponse.newBuilder()
          .addReceivedMessages(0, rm1)
          .addReceivedMessages(1, rm2)
          .addReceivedMessages(2, rm3)
          .addReceivedMessages(3, rm4)
          .build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(4, result.size());
  SourceRecord expected1 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected2 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          1,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected3 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          2,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected4 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected1, result.get(0));
  assertRecordsEqual(expected2, result.get(1));
  assertRecordsEqual(expected3, result.get(2));
  assertRecordsEqual(expected4, result.get(3));
}
 
Example 20
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the correct partition is assigned when the partition scheme is "hash_key". The test
 * has two cases, one where a key does exist and one where it does not.
 */
@Test
public void testPollWithPartitionSchemeHashKey() throws Exception {
  props.put(
      CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
      CloudPubSubSourceConnector.PartitionScheme.HASH_KEY.toString());
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  ReceivedMessage withoutKey = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage withKey = createReceivedMessage(ACK_ID2, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse =
      PullResponse.newBuilder()
          .addReceivedMessages(0, withKey)
          .addReceivedMessages(1, withoutKey)
          .build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(2, result.size());
  SourceRecord expectedForMessageWithKey =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS),
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expectedForMessageWithoutKey =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);

  assertRecordsEqual(expectedForMessageWithKey, result.get(0));
  assertArrayEquals((byte[])expectedForMessageWithoutKey.value(), (byte[])result.get(1).value());
}