Java Code Examples for org.apache.kafka.connect.sink.SinkRecord#keySchema()

The following examples show how to use org.apache.kafka.connect.sink.SinkRecord#keySchema() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PlainPayload.java    From kafka-connect-lambda with Apache License 2.0 6 votes vote down vote up
public PlainPayload(final SinkRecord record) {
    this.key = record.key() == null ? "" : record.key().toString();
    if (record.keySchema() != null)
        this.keySchemaName = record.keySchema().name();

    this.value = record.value() == null ? "" : record.value().toString();
    if (record.valueSchema() != null)
        this.valueSchemaName = record.valueSchema().name();

    this.topic = record.topic();
    this.partition = record.kafkaPartition();
    this.offset = record.kafkaOffset();

    if (record.timestamp() != null)
        this.timestamp = record.timestamp();
    if (record.timestampType() != null)
        this.timestampTypeName = record.timestampType().name;
}
 
Example 2
Source File: KafkaSinkTask.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Override
public void put(Collection<SinkRecord> collection) {
    // Any retriable exception thrown here will be attempted again and not cause the task to pause
    for(SinkRecord sinkRecord : collection) {
        if (sinkRecord.keySchema() != Schema.OPTIONAL_BYTES_SCHEMA || sinkRecord.valueSchema() != Schema.OPTIONAL_BYTES_SCHEMA)
            throw new IllegalStateException("Expected sink record key/value to be optional bytes, but saw instead key: "
                    + sinkRecord.keySchema() + " value: " + sinkRecord.valueSchema() + ". Must use converter: " +
                    "org.apache.kafka.connect.converters.ByteArrayConverter");

        LOGGER.debug("Sending record {}", sinkRecord);

        try {
            producer.send(new ProducerRecord<>(sinkRecord.topic(), sinkRecord.kafkaPartition(), (byte[]) sinkRecord.key(),
                    (byte[]) sinkRecord.value()));
        } catch (KafkaException e) {
            // If send throws an exception ensure we always retry the record/collection
            throw new RetriableException(e);
        }
    }
}
 
Example 3
Source File: SchemaUtils.java    From streamx with Apache License 2.0 6 votes vote down vote up
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
  switch (compatibility) {
    case BACKWARD:
    case FULL:
    case FORWARD:
      Schema sourceSchema = record.valueSchema();
      Object value = record.value();
      if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
        return record;
      }
      Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
      return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
                            record.key(), currentSchema, projected, record.kafkaOffset());
    default:
      return record;
  }
}
 
Example 4
Source File: RecordConverterFactory.java    From MongoDb-Sink-Connector with Apache License 2.0 6 votes vote down vote up
private String[] generateNameOptions(SinkRecord record) {
    String valueSchemaName = record.valueSchema().name();
    String valueSchemaType = record.valueSchema().type().getName();

    if (record.keySchema() != null) {
        String keySchemaName = record.keySchema().name();
        String keySchemaType = record.keySchema().type().getName();
        return new String[] {
                keySchemaName + "-" + valueSchemaName,
                keySchemaType + "-" + valueSchemaName,
                valueSchemaName,
                keySchemaName + "-" + valueSchemaType,
                keySchemaType + "-" + valueSchemaType,
                valueSchemaType,
                null,
        };
    } else {
        return new String[] {
                valueSchemaName,
                valueSchemaType,
                null,
        };
    }

}
 
Example 5
Source File: Payload.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
public Payload(final SinkRecord record) {
  if (record.keySchema() != null) {
    this.keySchemaName = record.keySchema().name();
    if (record.keySchema().version() != null ) {
      this.keySchemaVersion = record.keySchema().version().toString();
    }
  }

  if (record.valueSchema() != null) {
    this.valueSchemaName = record.valueSchema().name();
    if (record.valueSchema().version() != null ) {
      this.valueSchemaVersion = record.valueSchema().version().toString();
    }
  }

  this.topic = record.topic();
  this.partition = record.kafkaPartition();
  this.offset = record.kafkaOffset();

  if (record.timestamp() != null) {
    this.timestamp = record.timestamp();
  }
  if (record.timestampType() != null) {
    this.timestampTypeName = record.timestampType().name;
  }

}
 
Example 6
Source File: JsonPayloadFormatter.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
private Payload<Object, Object> recordToPayload(final SinkRecord record) {
  Object deserializedKey;
  Object deserializedValue;
  if (record.keySchema() == null) {
    deserializedKey = record.key();
  } else {
    deserializedKey = deserialize(keySchemaVisibility, record.topic(), record.keySchema(), record.key());
  }
  if (record.valueSchema() == null) {
    deserializedValue = record.value();
  } else {
    deserializedValue = deserialize(valueSchemaVisibility, record.topic(), record.valueSchema(), record.value());
  }

  Payload<Object, Object> payload = new Payload<>(record);
  payload.setKey(deserializedKey);
  payload.setValue(deserializedValue);
  if (keySchemaVisibility == SchemaVisibility.NONE) {
    payload.setKeySchemaName(null);
    payload.setKeySchemaVersion(null);
  }
  if (valueSchemaVisibility == SchemaVisibility.NONE) {
    payload.setValueSchemaName(null);
    payload.setValueSchemaVersion(null);
  }

  return payload;
}
 
Example 7
Source File: PatternRenameTest.java    From kafka-connect-transform-common with Apache License 2.0 4 votes vote down vote up
@Test
public void prefixed() {
  this.transformation.configure(
      ImmutableMap.of(
          PatternRenameConfig.FIELD_PATTERN_CONF, "^prefixed",
          PatternRenameConfig.FIELD_REPLACEMENT_CONF, ""
      )
  );

  Schema inputSchema = SchemaBuilder.struct()
      .name("testing")
      .field("prefixedfirstname", Schema.STRING_SCHEMA)
      .field("prefixedlastname", Schema.STRING_SCHEMA);
  Struct inputStruct = new Struct(inputSchema)
      .put("prefixedfirstname", "example")
      .put("prefixedlastname", "user");

  final Object key = isKey ? inputStruct : null;
  final Object value = isKey ? null : inputStruct;
  final Schema keySchema = isKey ? inputSchema : null;
  final Schema valueSchema = isKey ? null : inputSchema;

  final SinkRecord inputRecord = new SinkRecord(
      TOPIC,
      1,
      keySchema,
      key,
      valueSchema,
      value,
      1234L
  );
  final SinkRecord outputRecord = this.transformation.apply(inputRecord);
  assertNotNull(outputRecord);

  final Schema actualSchema = isKey ? outputRecord.keySchema() : outputRecord.valueSchema();
  final Struct actualStruct = (Struct) (isKey ? outputRecord.key() : outputRecord.value());

  final Schema expectedSchema = SchemaBuilder.struct()
      .name("testing")
      .field("firstname", Schema.STRING_SCHEMA)
      .field("lastname", Schema.STRING_SCHEMA);
  Struct expectedStruct = new Struct(expectedSchema)
      .put("firstname", "example")
      .put("lastname", "user");

  assertSchema(expectedSchema, actualSchema);
  assertStruct(expectedStruct, actualStruct);
}