Java Code Examples for org.apache.kafka.connect.sink.SinkRecord#topic()

The following examples show how to use org.apache.kafka.connect.sink.SinkRecord#topic() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PlainPayload.java    From kafka-connect-lambda with Apache License 2.0 6 votes vote down vote up
public PlainPayload(final SinkRecord record) {
    this.key = record.key() == null ? "" : record.key().toString();
    if (record.keySchema() != null)
        this.keySchemaName = record.keySchema().name();

    this.value = record.value() == null ? "" : record.value().toString();
    if (record.valueSchema() != null)
        this.valueSchemaName = record.valueSchema().name();

    this.topic = record.topic();
    this.partition = record.kafkaPartition();
    this.offset = record.kafkaOffset();

    if (record.timestamp() != null)
        this.timestamp = record.timestamp();
    if (record.timestampType() != null)
        this.timestampTypeName = record.timestampType().name;
}
 
Example 2
Source File: CouchbaseSinkTask.java    From kafka-connect-couchbase with Apache License 2.0 6 votes vote down vote up
private static String documentIdFromKafkaMetadata(SinkRecord record) {
  Object key = record.key();

  if (key instanceof String
      || key instanceof Number
      || key instanceof Boolean) {
    return key.toString();
  }

  if (key instanceof byte[]) {
    return new String((byte[]) key, UTF_8);
  }

  if (key instanceof ByteBuffer) {
    return toString((ByteBuffer) key);
  }

  return record.topic() + "/" + record.kafkaPartition() + "/" + record.kafkaOffset();
}
 
Example 3
Source File: MongoDbSinkTask.java    From MongoDb-Sink-Connector with Apache License 2.0 6 votes vote down vote up
@Override
public void put(Collection<SinkRecord> sinkRecords) {
    if (writer == null) {
        return;
    }

    putTimer.start();

    for (SinkRecord record : sinkRecords) {
        TopicPartition partition = new TopicPartition(record.topic(),
                record.kafkaPartition());
        latestOffsetPut.put(partition, record.kafkaOffset());
        buffer.add(record);
        monitor.increment();

        if (log.isDebugEnabled()) {
            log.debug("{} --> {}", partition, record.kafkaOffset());
        }
    }

    putTimer.stop();
}
 
Example 4
Source File: ToPutFunction.java    From kafka-connect-hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Converts the sinkRecord to a {@link Put} instance
 * The event parser parses the key schema of sinkRecord only when there is
 * no property configured for {@link HBaseSinkConfig#TABLE_ROWKEY_COLUMNS_TEMPLATE}
 *
 * @param sinkRecord
 * @return
 */
@Override
public Put apply(final SinkRecord sinkRecord) {
    Preconditions.checkNotNull(sinkRecord);
    final String table = sinkRecord.topic();
    final String columnFamily = columnFamily(table);
    final String delimiter = rowkeyDelimiter(table);

    final Map<String, byte[]> valuesMap  = this.eventParser.parseValue(sinkRecord);
    final Map<String, byte[]> keysMap = this.eventParser.parseKey(sinkRecord);

    valuesMap.putAll(keysMap);
    final String[] rowkeyColumns = rowkeyColumns(table);
    final byte[] rowkey = toRowKey(valuesMap, rowkeyColumns, delimiter);

    final Put put = new Put(rowkey);
    valuesMap.entrySet().stream().forEach(entry -> {
        final String qualifier = entry.getKey();
        final byte[] value = entry.getValue();
        put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier), value);
    });
    return put;
}
 
Example 5
Source File: SchemaUtils.java    From streamx with Apache License 2.0 6 votes vote down vote up
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
  switch (compatibility) {
    case BACKWARD:
    case FULL:
    case FORWARD:
      Schema sourceSchema = record.valueSchema();
      Object value = record.value();
      if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
        return record;
      }
      Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
      return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
                            record.key(), currentSchema, projected, record.kafkaOffset());
    default:
      return record;
  }
}
 
Example 6
Source File: BackupSinkTask.java    From kafka-backup with Apache License 2.0 6 votes vote down vote up
@Override
public void put(Collection<SinkRecord> records) {
    try {
        for (SinkRecord sinkRecord : records) {
            TopicPartition topicPartition = new TopicPartition(sinkRecord.topic(), sinkRecord.kafkaPartition());
            PartitionWriter partition = partitionWriters.get(topicPartition);
            partition.append(Record.fromSinkRecord(sinkRecord));
            if (sinkRecord.kafkaOffset() % 100 == 0) {
                log.debug("Backed up Topic {}, Partition {}, up to offset {}", sinkRecord.topic(), sinkRecord.kafkaPartition(), sinkRecord.kafkaOffset());
            }
            if (config.snapShotMode()) {
                currentOffsets.put(topicPartition, sinkRecord.kafkaOffset());
            }
        }

        // Todo: refactor to own worker. E.g. using the scheduler of MM2
        offsetSink.syncConsumerGroups();
        offsetSink.syncOffsets();

        if (config.snapShotMode()) {
            terminateIfCompleted();
        }
    } catch (IOException | SegmentIndex.IndexException | PartitionIndex.IndexException | SegmentWriter.SegmentException e) {
        throw new RuntimeException(e);
    }
}
 
Example 7
Source File: TopicPartitionRecordGrouper.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void put(final SinkRecord record) {
    Objects.requireNonNull(record, "record cannot be null");

    final TopicPartition tp = new TopicPartition(record.topic(), record.kafkaPartition());
    final SinkRecord currentHeadRecord = currentHeadRecords.computeIfAbsent(tp, ignored -> record);
    final String recordKey = generateRecordKey(tp, currentHeadRecord);

    if (shouldCreateNewFile(recordKey)) {
        // Create new file using this record as the head record.
        currentHeadRecords.put(tp, record);
        final String newRecordKey = generateRecordKey(tp, record);
        fileBuffers.computeIfAbsent(newRecordKey, ignored -> new ArrayList<>()).add(record);
    } else {
        fileBuffers.computeIfAbsent(recordKey, ignored -> new ArrayList<>()).add(record);
    }
}
 
Example 8
Source File: Record.java    From kafka-backup with Apache License 2.0 5 votes vote down vote up
public static Record fromSinkRecord(SinkRecord sinkRecord) {
    byte[] key = connectDataToBytes(sinkRecord.keySchema(), sinkRecord.key());
    byte[] value = connectDataToBytes(sinkRecord.valueSchema(), sinkRecord.value());
    RecordHeaders recordHeaders = new RecordHeaders();
    for (org.apache.kafka.connect.header.Header connectHeader : sinkRecord.headers()) {
        byte[] headerValue = connectDataToBytes(connectHeader.schema(), connectHeader.value());
        recordHeaders.add(connectHeader.key(), headerValue);
    }
    return new Record(sinkRecord.topic(), sinkRecord.kafkaPartition(), key, value, sinkRecord.kafkaOffset(), sinkRecord.timestamp(), sinkRecord.timestampType(), recordHeaders);
}
 
Example 9
Source File: SqsSinkConnectorTask.java    From kafka-connect-sqs with Apache License 2.0 5 votes vote down vote up
@Override
public void put( Collection<SinkRecord> records ) {
  if ( records.isEmpty() ) {
    return ;
  }

  if ( !isValidState() ) {
    throw new IllegalStateException( "Task is not properly initialized" ) ;
  }

  log.debug( ".put:record_count={}", records.size() ) ;
  for ( final SinkRecord record : records ) {
    final String mid = MessageFormat.format( "{0}-{1}-{2}", record.topic(), record.kafkaPartition().longValue(),
        record.kafkaOffset() ) ;
    final String key = Facility.isNotNull( record.key() ) ? record.key().toString() : null ;
    final String gid = Facility.isNotNullNorEmpty( key ) ? key : record.topic() ;
    final String body = Facility.isNotNull( record.value() ) ? record.value().toString() : "" ;

    if ( Facility.isNotNullNorEmpty( body ) ) {
      try {
        final String sid = client.send( config.getQueueUrl(), body, gid, mid ) ;

        log.debug( ".put.OK:message-id={}, queue.url={}, sqs-group-id={}, sqs-message-id={}", gid, mid,
            config.getQueueUrl(), sid ) ;
      } catch ( final RuntimeException e ) {
        log.error( "An Exception occurred while sending message {} to target url {}:", mid, config.getQueueUrl(),
            e ) ;
      }
    } else {
      log.warn( "Skipping empty message: key={}", key ) ;
    }

  }
}
 
Example 10
Source File: JsonPayloadConverter.java    From kafka-connect-aws-lambda with Apache License 2.0 5 votes vote down vote up
public String convert(SinkRecord record) throws JsonProcessingException {
  String topic = record.topic();
  Schema schema = record.valueSchema();
  Object value = record.value();

  String payload = objectMapper.writeValueAsString(
    jsonDeserializer.deserialize(topic,
      jsonConverter.fromConnectData(topic, schema, value)));

  if (log.isTraceEnabled()) {
    log.trace("P: {}", payload);
  }

  return payload;
}
 
Example 11
Source File: KafkaMetaDataStrategy.java    From kafka-connect-mongodb with Apache License 2.0 5 votes vote down vote up
@Override
public BsonValue generateId(SinkDocument doc, SinkRecord orig) {

   return new BsonString(orig.topic()
                    + DELIMITER + orig.kafkaPartition()
                    + DELIMITER + orig.kafkaOffset());

}
 
Example 12
Source File: MongodbSinkTask.java    From kafka-connect-mongodb with Apache License 2.0 5 votes vote down vote up
/**
 * Put the records in the sink.
 *
 * @param collection the set of records to send.
 */
@Override
public void put(Collection<SinkRecord> collection) {
    List<SinkRecord> records = new ArrayList<>(collection);
    for (int i = 0; i < records.size(); i++) {
        Map<String, List<WriteModel<Document>>> bulks = new HashMap<>();

        for (int j = 0; j < bulkSize && i < records.size(); j++, i++) {
            SinkRecord record = records.get(i);
            Map<String, Object> jsonMap = SchemaUtils.toJsonMap((Struct) record.value());
            String topic = record.topic();

            if (bulks.get(topic) == null) {
                bulks.put(topic, new ArrayList<WriteModel<Document>>());
            }

            Document newDocument = new Document(jsonMap)
                    .append("_id", record.kafkaOffset());

            log.trace("Adding to bulk: {}", newDocument.toString());
            bulks.get(topic).add(new UpdateOneModel<Document>(
                    Filters.eq("_id", record.kafkaOffset()),
                    new Document("$set", newDocument),
                    new UpdateOptions().upsert(true)));
        }
        i--;
        log.trace("Executing bulk");
        for (String key : bulks.keySet()) {
            try {
                com.mongodb.bulk.BulkWriteResult result = mapping.get(key).bulkWrite(bulks.get(key));
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }
    }
}
 
Example 13
Source File: RecordConverterFactory.java    From MongoDb-Sink-Connector with Apache License 2.0 5 votes vote down vote up
/**
 * Generate a converter from given record.
 *
 * <p>By default, this returns generic datatype converters. Override to return specific
 * topic-based converters.
 *
 * @param record record to convert
 * @return {@link RecordConverter} capable of converting that record
 * @throws DataException if no suitable {@link RecordConverter} was found.
 */
public RecordConverter getRecordConverter(SinkRecord record)
        throws DataException {

    for (String option : generateNameOptions(record)) {
        RecordConverter converter = genericConverterMap.get(option);
        if (converter != null) {
            return converter;
        }
    }

    throw new DataException("Cannot find a suitable RecordConverter class "
            + "for record with schema " + record.valueSchema().name()
            + " in topic " + record.topic());
}
 
Example 14
Source File: Payload.java    From kafka-connect-lambda with Apache License 2.0 5 votes vote down vote up
public Payload(final SinkRecord record) {
  if (record.keySchema() != null) {
    this.keySchemaName = record.keySchema().name();
    if (record.keySchema().version() != null ) {
      this.keySchemaVersion = record.keySchema().version().toString();
    }
  }

  if (record.valueSchema() != null) {
    this.valueSchemaName = record.valueSchema().name();
    if (record.valueSchema().version() != null ) {
      this.valueSchemaVersion = record.valueSchema().version().toString();
    }
  }

  this.topic = record.topic();
  this.partition = record.kafkaPartition();
  this.offset = record.kafkaOffset();

  if (record.timestamp() != null) {
    this.timestamp = record.timestamp();
  }
  if (record.timestampType() != null) {
    this.timestampTypeName = record.timestampType().name;
  }

}
 
Example 15
Source File: KafkaMetaDataStrategy.java    From mongo-kafka with Apache License 2.0 4 votes vote down vote up
@Override
public BsonValue generateId(final SinkDocument doc, final SinkRecord orig) {
  return new BsonString(
      orig.topic() + DELIMITER + orig.kafkaPartition() + DELIMITER + orig.kafkaOffset());
}
 
Example 16
Source File: JsonRecordParser.java    From kafka-connect-zeebe with Apache License 2.0 4 votes vote down vote up
private String generateId(final SinkRecord record) {
  return record.topic() + ":" + record.kafkaPartition() + ":" + record.kafkaOffset();
}
 
Example 17
Source File: GcsSinkTaskGroupByTopicPartitionPropertiesTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 4 votes vote down vote up
private String createFilename(final SinkRecord record) {
    return PREFIX + record.topic() + "-" + record.kafkaPartition() + "-" + record.kafkaOffset();
}
 
Example 18
Source File: MongoDbWriter.java    From MongoDb-Sink-Connector with Apache License 2.0 4 votes vote down vote up
KafkaDocument(SinkRecord record) {
    this.record = record;
    partition = new TopicPartition(record.topic(), record.kafkaPartition());
    offset = record.kafkaOffset();
}
 
Example 19
Source File: JsonSinkClickHouseTask.java    From kafka-connectors with Apache License 2.0 4 votes vote down vote up
/**
 * 解析每条 json 数据, 并将表信息和数据信息封装为 ClickHouseSinkData 对象
 */
private ClickHouseSinkData handlerRecord(ObjectMapper mapper, Map<String, Map<String, String>> tableColumns, SinkRecord record) throws IOException, SQLException, ParseException {
    JsonNode jsonNode = mapper.readTree((String) record.value());
    String topic = record.topic();
    ClickHouseTableInfo clickHouseTableInfo = sinkTableMap.get(topic);
    Map<String, String> ckTabColumns = tableColumns.get(clickHouseTableInfo.getTable());
    if (ckTabColumns == null) {
        ckTabColumns = dataSource.descTableColType(String.format("`%s`.`%s`", sinkDb, clickHouseTableInfo.getTable()));
        tableColumns.put(clickHouseTableInfo.getTable(), ckTabColumns);
    }
    ClickHouseSinkData clickHouseSinkData = new ClickHouseSinkData();
    List<String> columns = new ArrayList<>();
    List<Object> values = new ArrayList<>();
    Iterator<String> keys = jsonNode.fieldNames();
    while (keys.hasNext()) {
        String fieldName = keys.next();
        String colType = ckTabColumns.get(fieldName);
        if (colType != null) {
            columns.add("`" + fieldName + "`");
            JsonNode nodeValue = jsonNode.get(fieldName);
            if (ClickHouseTypeConvert.isNumberType(colType)) {
                values.add(nodeValue.asText());
            } else if (nodeValue instanceof ContainerNode) {
                values.add(String.format("'%s'", nodeValue.toString().replaceAll("'", "\\\\'")));
            } else {
                values.add(String.format("'%s'", nodeValue.asText().replaceAll("'", "\\\\'")));
            }
        } else {
            throw new ConfigException(String.format("topic: %s, 列: %s, 不存在ClickHouse表: %s中, 该表所有列: %s",
                    topic, fieldName, clickHouseTableInfo.getTable(), StringUtils.join(ckTabColumns.keySet(), ", ")));
        }
    }
    columns.add("`" + clickHouseTableInfo.getSinkDateCol() + "`");
    if (StringUtils.isNotEmpty(clickHouseTableInfo.getSourceDateCol())) {
        JsonNode sourceDateNode = jsonNode.get(clickHouseTableInfo.getSourceDateCol());
        if (sourceDateNode != null) {
            values.add(String.format("'%s'", dateFormat.format(clickHouseTableInfo.getDf().parse(sourceDateNode.asText()))));
        } else {
            values.add(String.format("'%s'", dateFormat.format(System.currentTimeMillis())));
        }
    } else {
        values.add(String.format("'%s'", dateFormat.format(System.currentTimeMillis())));
    }
    clickHouseSinkData.setTable(clickHouseTableInfo.getTable());
    clickHouseSinkData.setLocalTable(clickHouseTableInfo.getLocalTable());
    clickHouseSinkData.setColumns(StringUtils.join(columns, ", "));
    clickHouseSinkData.putValue(String.format("(%s)", StringUtils.join(values, ", ")));
    return clickHouseSinkData;
}