Java Code Examples for org.apache.kafka.connect.sink.SinkRecord#value()

The following examples show how to use org.apache.kafka.connect.sink.SinkRecord#value() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PlainPayload.java    From kafka-connect-lambda with Apache License 2.0 6 votes vote down vote up
public PlainPayload(final SinkRecord record) {
    this.key = record.key() == null ? "" : record.key().toString();
    if (record.keySchema() != null)
        this.keySchemaName = record.keySchema().name();

    this.value = record.value() == null ? "" : record.value().toString();
    if (record.valueSchema() != null)
        this.valueSchemaName = record.valueSchema().name();

    this.topic = record.topic();
    this.partition = record.kafkaPartition();
    this.offset = record.kafkaOffset();

    if (record.timestamp() != null)
        this.timestamp = record.timestamp();
    if (record.timestampType() != null)
        this.timestampTypeName = record.timestampType().name;
}
 
Example 2
Source File: CouchbaseSinkTask.java    From kafka-connect-couchbase with Apache License 2.0 6 votes vote down vote up
/**
 * Converts Kafka records to documents and indexes them by document ID.
 * <p>
 * If there are duplicate document IDs, ignores all but the last. This
 * prevents a stale version of the document from "winning" by being the
 * last one written to Couchbase.
 *
 * @return a map where the key is the ID of a document, and the value is the document.
 * A null value indicates the document should be deleted.
 */
private Map<String, SinkRecordAndDocument> toJsonBinaryDocuments(java.util.Collection<SinkRecord> records) {
  Map<String, SinkRecordAndDocument> idToSourceRecordAndDocument = new HashMap<>();
  for (SinkRecord record : records) {
    if (record.value() == null) {
      String documentId = documentIdFromKafkaMetadata(record);
      idToSourceRecordAndDocument.put(documentId, new SinkRecordAndDocument(record, null));
      continue;
    }

    JsonBinaryDocument doc = convert(record);
    idToSourceRecordAndDocument.put(doc.id(), new SinkRecordAndDocument(record, doc));
  }

  int deduplicatedRecords = records.size() - idToSourceRecordAndDocument.size();
  if (deduplicatedRecords != 0) {
    LOGGER.debug("Batch contained {} redundant Kafka records.", deduplicatedRecords);
  }

  return idToSourceRecordAndDocument;
}
 
Example 3
Source File: SchemaUtils.java    From streamx with Apache License 2.0 6 votes vote down vote up
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
  switch (compatibility) {
    case BACKWARD:
    case FULL:
    case FORWARD:
      Schema sourceSchema = record.valueSchema();
      Object value = record.value();
      if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
        return record;
      }
      Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
      return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
                            record.key(), currentSchema, projected, record.kafkaOffset());
    default:
      return record;
  }
}
 
Example 4
Source File: TimestampNowFieldTest.java    From kafka-connect-transform-common with Apache License 2.0 6 votes vote down vote up
@Test
public void mapFieldMissing() {
  final Map<String, Object> expected = ImmutableMap.of(
      "firstName", "example", "lastName", "user", "timestamp", timestamp
  );
  final SinkRecord input = new SinkRecord(
      "test",
      1,
      null,
      null,
      null,
      ImmutableMap.of("firstName", "example", "lastName", "user"),
      1234L
  );
  final SinkRecord output = this.transformation.apply(input);
  assertNotNull(output, "output should not be null.");
  assertTrue(output.value() instanceof Map, "value should be a struct");
  final Map<String, Object> actual = (Map<String, Object>) output.value();
  assertEquals(expected, actual);
}
 
Example 5
Source File: DataConverter.java    From jkes with Apache License 2.0 6 votes vote down vote up
public static DeletableRecord convertRecord(SinkRecord record, boolean ignoreSchema, String versionType) {
  final Schema schema;
  final Object value;
  if (!ignoreSchema) {
    schema = preProcessSchema(record.valueSchema());
    value = preProcessValue(record.value(), record.valueSchema(), schema);
  } else {
    schema = record.valueSchema();
    value = record.value();
  }

  final String payload = new String(JSON_CONVERTER.fromConnectData(record.topic(), schema, value), StandardCharsets.UTF_8);

  if (StringUtils.isNotBlank(payload)) {
    DeleteEvent deleteEvent = GSON.fromJson(payload, DeleteEvent.class);
    return new DeletableRecord(new Key(deleteEvent.getIndex(), deleteEvent.getType(), deleteEvent.getId()), deleteEvent.getVersion(), versionType);
  } else {
    return null;
  }

}
 
Example 6
Source File: SinkConverter.java    From kafka-connect-mongodb with Apache License 2.0 6 votes vote down vote up
public SinkDocument convert(SinkRecord record) {

        logger.debug(record.toString());

        BsonDocument keyDoc = null;
        if(record.key() != null) {
            keyDoc = getRecordConverter(record.key(),record.keySchema())
                            .convert(record.keySchema(), record.key());
        }

        BsonDocument valueDoc = null;
        if(record.value() != null) {
            valueDoc = getRecordConverter(record.value(),record.valueSchema())
                    .convert(record.valueSchema(), record.value());
        }

        return new SinkDocument(keyDoc, valueDoc);

    }
 
Example 7
Source File: SinkConverter.java    From mongo-kafka with Apache License 2.0 6 votes vote down vote up
public SinkDocument convert(final SinkRecord record) {
  LOGGER.debug(record.toString());

  BsonDocument keyDoc = null;
  if (record.key() != null) {
    keyDoc =
        new LazyBsonDocument(
            () ->
                getRecordConverter(record.key(), record.keySchema())
                    .convert(record.keySchema(), record.key()));
  }

  BsonDocument valueDoc = null;
  if (record.value() != null) {
    valueDoc =
        new LazyBsonDocument(
            () ->
                getRecordConverter(record.value(), record.valueSchema())
                    .convert(record.valueSchema(), record.value()));
  }

  return new SinkDocument(keyDoc, valueDoc);
}
 
Example 8
Source File: AbstractValueWriter.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * Takes the {@link SinkRecord}'s value as a byte array.
 *
 * <p>If the value is {@code null}, it outputs nothing.
 *
 * <p>If the value is not {@code null}, it assumes the value <b>is</b> a byte array.
 *
 * @param record       the record to get the value from
 * @param outputStream the stream to write to
 * @throws DataException when the value is not actually a byte array
 */
@Override
public void write(final SinkRecord record,
                  final OutputStream outputStream) throws IOException {
    Objects.requireNonNull(record, "record cannot be null");
    Objects.requireNonNull(record.valueSchema(), "value schema cannot be null");
    Objects.requireNonNull(outputStream, "outputStream cannot be null");

    if (record.valueSchema().type() != Schema.Type.BYTES) {
        final String msg = String.format("Record value schema type must be %s, %s given",
            Schema.Type.BYTES, record.valueSchema().type());
        throw new DataException(msg);
    }

    // Do nothing if the key is null.
    if (record.value() == null) {
        return;
    }

    if (!(record.value() instanceof byte[])) {
        throw new DataException("Value is not a byte array");
    }

    outputStream.write(getOutputBytes((byte[]) record.value()));
}
 
Example 9
Source File: TigerGraphSinkTask.java    From ecosys with Apache License 2.0 5 votes vote down vote up
@Override
public void put(Collection<SinkRecord> records) {
    long parseStart = System.currentTimeMillis();
    if (records.isEmpty()) {
        return;
    }
    for (SinkRecord record: records) {
        Object value = record.value();
        if (value instanceof Struct) {
            log.debug("record.value() is Struct type");
            byte[] rawJson = converter.fromConnectData(
                    record.topic(),
                    record.valueSchema(),
                    value
            );
            ret.append(rawJson);
        } else if (value instanceof HashMap){
            log.debug("record.value() is HashMap type with gson.toJson(value), i.e.");
            log.debug(this.gson.toJson(value));
            ret.append(this.gson.toJson(value));
        } else {
            log.debug("record.value() is not a Struct or a HashMap type and write directly");
            ret.append(value);
        }
        ret.append(config.eol);
    }
    log.debug("url = " + config.GetNextUrl());
    log.debug("data = " + ret.toString());
    this.accumulated += records.size();
    this.parseTime += System.currentTimeMillis() - parseStart;
    commit();
}
 
Example 10
Source File: JsonPayloadConverter.java    From kafka-connect-aws-lambda with Apache License 2.0 5 votes vote down vote up
public String convert(SinkRecord record) throws JsonProcessingException {
  String topic = record.topic();
  Schema schema = record.valueSchema();
  Object value = record.value();

  String payload = objectMapper.writeValueAsString(
    jsonDeserializer.deserialize(topic,
      jsonConverter.fromConnectData(topic, schema, value)));

  if (log.isTraceEnabled()) {
    log.trace("P: {}", payload);
  }

  return payload;
}
 
Example 11
Source File: PatternRenameTest.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
@Test
public void schemaLess() {
  this.transformation.configure(
      ImmutableMap.of(
          PatternRenameConfig.FIELD_PATTERN_CONF, "\\.",
          PatternRenameConfig.FIELD_REPLACEMENT_CONF, "_"
      )
  );

  final Map<String, Object> input = ImmutableMap.of(
      "first.name", "example",
      "last.name", "user"
  );
  final Map<String, Object> expected = ImmutableMap.of(
      "first_name", "example",
      "last_name", "user"
  );

  final Object key = isKey ? input : null;
  final Object value = isKey ? null : input;
  final Schema keySchema = null;
  final Schema valueSchema = null;

  final SinkRecord inputRecord = new SinkRecord(
      TOPIC,
      1,
      keySchema,
      key,
      valueSchema,
      value,
      1234L
  );
  final SinkRecord outputRecord = this.transformation.apply(inputRecord);
  assertNotNull(outputRecord);
  final Map<String, Object> actual = (Map<String, Object>) (isKey ? outputRecord.key() : outputRecord.value());
  assertMap(expected, actual, "");
}
 
Example 12
Source File: TimestampNowFieldTest.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
@Test
public void structFieldMissing() {
  final Schema inputSchema = SchemaBuilder.struct()
      .name("something")
      .field("firstName", Schema.STRING_SCHEMA)
      .field("lastName", Schema.STRING_SCHEMA)
      .build();
  final Schema expectedSchema = SchemaBuilder.struct()
      .name("something")
      .field("firstName", Schema.STRING_SCHEMA)
      .field("lastName", Schema.STRING_SCHEMA)
      .field("timestamp", Timestamp.SCHEMA)
      .build();
  final Struct inputStruct = new Struct(inputSchema)
      .put("firstName", "example")
      .put("lastName", "user");
  final Struct expectedStruct = new Struct(expectedSchema)
      .put("firstName", "example")
      .put("lastName", "user")
      .put("timestamp", timestamp);
  final SinkRecord input = new SinkRecord(
      "test",
      1,
      null,
      null,
      inputSchema,
      inputStruct,
      1234L
  );
  final SinkRecord output = this.transformation.apply(input);
  assertNotNull(output, "output should not be null.");
  assertTrue(output.value() instanceof Struct, "value should be a struct");
  final Struct actualStruct = (Struct) output.value();
  assertStruct(expectedStruct, actualStruct);
}
 
Example 13
Source File: TimestampNowFieldTest.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
@Test
public void structFieldExists() {
  final Schema inputSchema = SchemaBuilder.struct()
      .name("something")
      .field("firstName", Schema.STRING_SCHEMA)
      .field("lastName", Schema.STRING_SCHEMA)
      .field("timestamp", Timestamp.SCHEMA)
      .build();
  final Schema expectedSchema = SchemaBuilder.struct()
      .name("something")
      .field("firstName", Schema.STRING_SCHEMA)
      .field("lastName", Schema.STRING_SCHEMA)
      .field("timestamp", Timestamp.SCHEMA)
      .build();
  final Struct inputStruct = new Struct(inputSchema)
      .put("firstName", "example")
      .put("lastName", "user");
  final Struct expectedStruct = new Struct(expectedSchema)
      .put("firstName", "example")
      .put("lastName", "user")
      .put("timestamp", timestamp);
  final SinkRecord input = new SinkRecord(
      "test",
      1,
      null,
      null,
      inputSchema,
      inputStruct,
      1234L
  );
  final SinkRecord output = this.transformation.apply(input);
  assertNotNull(output, "output should not be null.");
  assertTrue(output.value() instanceof Struct, "value should be a struct");
  final Struct actualStruct = (Struct) output.value();
  assertStruct(expectedStruct, actualStruct);
}
 
Example 14
Source File: TimestampNowFieldTest.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
@Test
public void structFieldMismatch() {
  final Schema inputSchema = SchemaBuilder.struct()
      .name("something")
      .field("firstName", Schema.STRING_SCHEMA)
      .field("lastName", Schema.STRING_SCHEMA)
      .field("timestamp", Schema.STRING_SCHEMA)
      .build();
  final Schema expectedSchema = SchemaBuilder.struct()
      .name("something")
      .field("firstName", Schema.STRING_SCHEMA)
      .field("lastName", Schema.STRING_SCHEMA)
      .field("timestamp", Timestamp.SCHEMA)
      .build();
  final Struct inputStruct = new Struct(inputSchema)
      .put("firstName", "example")
      .put("lastName", "user");
  final Struct expectedStruct = new Struct(expectedSchema)
      .put("firstName", "example")
      .put("lastName", "user")
      .put("timestamp", timestamp);
  final SinkRecord input = new SinkRecord(
      "test",
      1,
      null,
      null,
      inputSchema,
      inputStruct,
      1234L
  );
  final SinkRecord output = this.transformation.apply(input);
  assertNotNull(output, "output should not be null.");
  assertTrue(output.value() instanceof Struct, "value should be a struct");
  final Struct actualStruct = (Struct) output.value();
  assertStruct(expectedStruct, actualStruct);
}
 
Example 15
Source File: FieldPartitioner.java    From streamx with Apache License 2.0 5 votes vote down vote up
@Override
public String encodePartition(SinkRecord sinkRecord) {
  Object value = sinkRecord.value();
  Schema valueSchema = sinkRecord.valueSchema();
  if (value instanceof Struct) {
    Struct struct = (Struct) value;
    Object partitionKey = struct.get(fieldName);
    Type type = valueSchema.field(fieldName).schema().type();
    switch (type) {
      case INT8:
      case INT16:
      case INT32:
      case INT64:
        Number record = (Number) partitionKey;
        return fieldName + "=" + record.toString();
      case STRING:
        return fieldName + "=" + (String) partitionKey;
      case BOOLEAN:
        boolean booleanRecord = (boolean) partitionKey;
        return fieldName + "=" + Boolean.toString(booleanRecord);
      default:
        log.error("Type {} is not supported as a partition key.", type.getName());
        throw new PartitionException("Error encoding partition.");
    }
  } else {
    log.error("Value is not Struct type.");
    throw new PartitionException("Error encoding partition.");
  }
}
 
Example 16
Source File: ObjectMapperFactory.java    From kafka-connect-splunk with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(SinkRecord sinkRecord, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException, JsonProcessingException {
  Event event = new Event();
  event.event = sinkRecord.value();

  if (event.event instanceof Map) {
    handleMap(event);
  } else if (event.event instanceof Struct) {
    handleStruct(event);
  }

  //TODO: When we go to the next Kafka version. Check for null date and use the timestamp of the SinkRecord.

  jsonGenerator.writeObject(event);
}
 
Example 17
Source File: JsonPayloadFormatter.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
private Payload<Object, Object> recordToPayload(final SinkRecord record) {
  Object deserializedKey;
  Object deserializedValue;
  if (record.keySchema() == null) {
    deserializedKey = record.key();
  } else {
    deserializedKey = deserialize(keySchemaVisibility, record.topic(), record.keySchema(), record.key());
  }
  if (record.valueSchema() == null) {
    deserializedValue = record.value();
  } else {
    deserializedValue = deserialize(valueSchemaVisibility, record.topic(), record.valueSchema(), record.value());
  }

  Payload<Object, Object> payload = new Payload<>(record);
  payload.setKey(deserializedKey);
  payload.setValue(deserializedValue);
  if (keySchemaVisibility == SchemaVisibility.NONE) {
    payload.setKeySchemaName(null);
    payload.setKeySchemaVersion(null);
  }
  if (valueSchemaVisibility == SchemaVisibility.NONE) {
    payload.setValueSchemaName(null);
    payload.setValueSchemaVersion(null);
  }

  return payload;
}
 
Example 18
Source File: PatternRenameTest.java    From kafka-connect-transform-common with Apache License 2.0 4 votes vote down vote up
@Test
public void prefixed() {
  this.transformation.configure(
      ImmutableMap.of(
          PatternRenameConfig.FIELD_PATTERN_CONF, "^prefixed",
          PatternRenameConfig.FIELD_REPLACEMENT_CONF, ""
      )
  );

  Schema inputSchema = SchemaBuilder.struct()
      .name("testing")
      .field("prefixedfirstname", Schema.STRING_SCHEMA)
      .field("prefixedlastname", Schema.STRING_SCHEMA);
  Struct inputStruct = new Struct(inputSchema)
      .put("prefixedfirstname", "example")
      .put("prefixedlastname", "user");

  final Object key = isKey ? inputStruct : null;
  final Object value = isKey ? null : inputStruct;
  final Schema keySchema = isKey ? inputSchema : null;
  final Schema valueSchema = isKey ? null : inputSchema;

  final SinkRecord inputRecord = new SinkRecord(
      TOPIC,
      1,
      keySchema,
      key,
      valueSchema,
      value,
      1234L
  );
  final SinkRecord outputRecord = this.transformation.apply(inputRecord);
  assertNotNull(outputRecord);

  final Schema actualSchema = isKey ? outputRecord.keySchema() : outputRecord.valueSchema();
  final Struct actualStruct = (Struct) (isKey ? outputRecord.key() : outputRecord.value());

  final Schema expectedSchema = SchemaBuilder.struct()
      .name("testing")
      .field("firstname", Schema.STRING_SCHEMA)
      .field("lastname", Schema.STRING_SCHEMA);
  Struct expectedStruct = new Struct(expectedSchema)
      .put("firstname", "example")
      .put("lastname", "user");

  assertSchema(expectedSchema, actualSchema);
  assertStruct(expectedStruct, actualStruct);
}
 
Example 19
Source File: RecordService.java    From snowflake-kafka-connector with Apache License 2.0 4 votes vote down vote up
/**
 * process given SinkRecord,
 * only support snowflake converters
 *
 * @param record SinkRecord
 * @return a record string, already to output
 */
public String processRecord(SinkRecord record)
{
  if (!record.valueSchema().name().equals(SnowflakeJsonSchema.NAME))
  {
    throw SnowflakeErrors.ERROR_0009.getException();
  }
  if (!(record.value() instanceof SnowflakeRecordContent))
  {
    throw SnowflakeErrors.ERROR_0010
      .getException("Input record should be SnowflakeRecordContent object");
  }

  SnowflakeRecordContent valueContent = (SnowflakeRecordContent) record.value();

  ObjectNode meta = MAPPER.createObjectNode();
  if (metadataConfig.topicFlag)
  {
    meta.put(TOPIC, record.topic());
  }
  if (metadataConfig.offsetAndPartitionFlag)
  {
    meta.put(OFFSET, record.kafkaOffset());
    meta.put(PARTITION, record.kafkaPartition());
  }

  //ignore if no timestamp
  if (record.timestampType() != TimestampType.NO_TIMESTAMP_TYPE &&
    metadataConfig.createtimeFlag)
  {
    meta.put(record.timestampType().name, record.timestamp());
  }

  //include schema id if using avro with schema registry
  if (valueContent.getSchemaID() != SnowflakeRecordContent.NON_AVRO_SCHEMA)
  {
    meta.put(SCHEMA_ID, valueContent.getSchemaID());
  }

  putKey(record, meta);

  if (!record.headers().isEmpty())
  {
    meta.set(HEADERS, parseHeaders(record.headers()));
  }


  StringBuilder buffer = new StringBuilder();
  for (JsonNode node : valueContent.getData())
  {
    ObjectNode data = MAPPER.createObjectNode();
    data.set(CONTENT, node);
    if (metadataConfig.allFlag)
    {
      data.set(META, meta);
    }
    buffer.append(data.toString());
  }
  return buffer.toString();
}