org.apache.kafka.connect.source.SourceRecord Java Examples

The following examples show how to use org.apache.kafka.connect.source.SourceRecord. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaMonitor.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private List<Transformation<SourceRecord>> validateTransformations(
    List<Transformation<SourceRecord>> transformations) {
  List<Transformation<SourceRecord>> regexRouters = new ArrayList<>();

  // No need to validate transforms if we're not checking destination partitions
  if (this.topicCheckingEnabled) {
    for (Transformation<SourceRecord> transform : transformations) {
      String transformName = transform.getClass().getSimpleName();
      if (transform instanceof RegexRouter) {
        regexRouters.add(transform);
        // Slightly awkward check to see if any other routing transforms are configured
      } else if (transformName.contains("Router")) {
        throw new IllegalArgumentException(
            String.format(
                "Unsupported Router Transformation %s found."
                    + " To use it, please disable destination topic checking by setting 'enable.destination.topic.checking' to false.",
                transformName));
      } else {
        logger.debug("Ignoring non-routing Transformation {}", transformName);
      }
    }
  }
  return regexRouters;
}
 
Example #2
Source File: CamelSourceTaskTest.java    From camel-kafka-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testSourcePolling() {
    final long size = 2;
    Map<String, String> props = new HashMap<>();
    props.put(CamelSourceConnectorConfig.TOPIC_CONF, TOPIC_NAME);
    props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_URL_CONF, DIRECT_URI);

    CamelSourceTask sourceTask = new CamelSourceTask();
    sourceTask.start(props);

    sendBatchOfRecords(sourceTask, size);
    List<SourceRecord> poll = sourceTask.poll();

    assertEquals(size, poll.size());
    assertEquals(TOPIC_NAME, poll.get(0).topic());
    assertEquals(LoggingLevel.OFF.toString(), sourceTask.getCamelSourceConnectorConfig(props)
        .getString(CamelSourceConnectorConfig.CAMEL_SOURCE_CONTENT_LOG_LEVEL_CONF));

    sourceTask.stop();
}
 
Example #3
Source File: CamelSourceTaskTest.java    From camel-kafka-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testSourcePollingMaxBatchPollSize() {
    final long size = 2;
    Map<String, String> props = new HashMap<>();
    props.put(CamelSourceConnectorConfig.TOPIC_CONF, TOPIC_NAME);
    props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_URL_CONF, DIRECT_URI);
    props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_MAX_BATCH_POLL_SIZE_CONF, String.valueOf(size));

    CamelSourceTask sourceTask = new CamelSourceTask();
    sourceTask.start(props);

    sendBatchOfRecords(sourceTask, size + 1);
    List<SourceRecord> poll = sourceTask.poll();
    int pollSize = poll.size();

    assertTrue(pollSize >= 0 && pollSize <= size, "Batch size: " + pollSize + ", expected between 0 and " + size);
    sourceTask.stop();
}
 
Example #4
Source File: CamelSourceTaskTest.java    From camel-kafka-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testSourcePollingTimeout() {
    final long size = 999;
    Map<String, String> props = new HashMap<>();
    props.put(CamelSourceConnectorConfig.TOPIC_CONF, TOPIC_NAME);
    props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_URL_CONF, DIRECT_URI);
    props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_MAX_POLL_DURATION_CONF, "2");

    CamelSourceTask sourceTask = new CamelSourceTask();
    sourceTask.start(props);

    sendBatchOfRecords(sourceTask, size);
    List<SourceRecord>  poll = sourceTask.poll();
    int pollSize = poll.size();

    assertTrue(pollSize < size, "Batch size: " + pollSize + ", expected strictly less than " + size);
    sourceTask.stop();
}
 
Example #5
Source File: ZeebeSourceTask.java    From kafka-connect-zeebe with Apache License 2.0 6 votes vote down vote up
private SourceRecord transformJob(final ActivatedJob job) {
  final String topic = topicExtractor.extract(job);
  final Map<String, Integer> sourcePartition =
      Collections.singletonMap("partitionId", decodePartitionId(job.getKey()));
  // a better sourceOffset would be the position but we don't have it here unfortunately
  // key is however a monotonically increasing value, so in a sense it can provide a good
  // approximation of an offset
  final Map<String, Long> sourceOffset = Collections.singletonMap("key", job.getKey());
  return new SourceRecord(
      sourcePartition,
      sourceOffset,
      topic,
      Schema.INT64_SCHEMA,
      job.getKey(),
      Schema.STRING_SCHEMA,
      job.toJson());
}
 
Example #6
Source File: CamelTypeConverterTransformTest.java    From camel-kafka-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testIfItConvertsConnectRecordCorrectly() {
    final SourceRecord connectRecord = new SourceRecord(Collections.emptyMap(), Collections.emptyMap(), "topic", Schema.STRING_SCHEMA, "1234", Schema.STRING_SCHEMA, "TRUE");

    final Map<String, Object> propsForKeySmt = new HashMap<>();
    propsForKeySmt.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, Integer.class.getName());

    final Map<String, Object> propsForValueSmt = new HashMap<>();
    propsForValueSmt.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, "java.lang.Boolean");

    final Transformation<SourceRecord> transformationKey = new CamelTypeConverterTransform.Key<>();
    final Transformation<SourceRecord> transformationValue = new CamelTypeConverterTransform.Value<>();

    transformationKey.configure(propsForKeySmt);
    transformationValue.configure(propsForValueSmt);

    final SourceRecord transformedKeySourceRecord = transformationKey.apply(connectRecord);
    final SourceRecord transformedValueSourceRecord = transformationValue.apply(connectRecord);

    assertEquals(1234, transformedKeySourceRecord.key());
    assertEquals(Schema.INT32_SCHEMA, transformedKeySourceRecord.keySchema());

    assertEquals(true, transformedValueSourceRecord.value());
    assertEquals(Schema.BOOLEAN_SCHEMA, transformedValueSourceRecord.valueSchema());
}
 
Example #7
Source File: AbstractKafkaConnectSource.java    From hazelcast-jet-contrib with Apache License 2.0 6 votes vote down vote up
public void fillBuffer(SourceBuilder.TimestampedSourceBuffer<T> buf) {
    if (!taskInit) {
        task.initialize(new JetSourceTaskContext());
        task.start(taskConfig);
        taskInit = true;
    }
    try {
        List<SourceRecord> records = task.poll();
        if (records == null) {
            return;
        }

        for (SourceRecord record : records) {
            boolean added = addToBuffer(record, buf);
            if (added) {
                partitionsToOffset.put(record.sourcePartition(), record.sourceOffset());
            }
        }
    } catch (InterruptedException e) {
        throw rethrow(e);
    }
}
 
Example #8
Source File: SpoolDirLineDelimitedSourceTask.java    From kafka-connect-spooldir with Apache License 2.0 6 votes vote down vote up
@Override
protected List<SourceRecord> process() throws IOException {
  int recordCount = 0;
  List<SourceRecord> records = new ArrayList<>(this.config.batchSize);
  String line = null;
  while (recordCount < this.config.batchSize && null != (line = this.reader.readLine())) {
    SourceRecord record = record(
        null,
        new SchemaAndValue(Schema.STRING_SCHEMA, line),
        null
    );
    records.add(record);
    recordCount++;
  }
  return records;
}
 
Example #9
Source File: MongodbSourceTask.java    From kafka-connect-mongodb with Apache License 2.0 6 votes vote down vote up
/**
 * Poll this MongodbSourceTask for new records.
 *
 * @return a list of source records
 * @throws InterruptException
 */
@Override
public List<SourceRecord> poll() throws InterruptException {
    List<SourceRecord> records = new ArrayList<>();
    while (!reader.isEmpty()) {
    	Document message = reader.pool();
        Struct messageStruct = getStruct(message);
        String topic = getTopic(message);
        String db = getDB(message);
        String timestamp = getTimestamp(message);
        records.add(new SourceRecord(Collections.singletonMap("mongodb", db), Collections.singletonMap(db, timestamp), topic, messageStruct.schema(), messageStruct));
        log.trace(message.toString());
    }


    return records;
}
 
Example #10
Source File: IgniteSourceTask.java    From ignite with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
@Override public List<SourceRecord> poll() throws InterruptedException {
    ArrayList<SourceRecord> records = new ArrayList<>(evtBatchSize);
    ArrayList<CacheEvent> evts = new ArrayList<>(evtBatchSize);

    if (stopped)
        return records;

    try {
        if (evtBuf.drainTo(evts, evtBatchSize) > 0) {
            for (CacheEvent evt : evts) {
                // schema and keys are ignored.
                for (String topic : topics)
                    records.add(new SourceRecord(srcPartition, offset, topic, null, evt));
            }

            return records;
        }
    }
    catch (IgniteException e) {
        log.error("Error when polling event queue!", e);
    }

    // for shutdown.
    return null;
}
 
Example #11
Source File: CloudPubSubSourceTask.java    From pubsub with Apache License 2.0 6 votes vote down vote up
private SourceRecord createRecordWithHeaders(Map<String, String> messageAttributes, Map<String,String> ack,
                                             String key, byte[] messageBytes, Long timestamp) {
  ConnectHeaders headers = new ConnectHeaders();
  for (Entry<String, String> attribute :
          messageAttributes.entrySet()) {
    if (!attribute.getKey().equals(kafkaMessageKeyAttribute)) {
      headers.addString(attribute.getKey(), attribute.getValue());
    }
  }

  return new SourceRecord(
          null,
          ack,
          kafkaTopic,
          selectPartition(key, messageBytes),
          Schema.OPTIONAL_STRING_SCHEMA,
          key,
          Schema.BYTES_SCHEMA,
          messageBytes,
          timestamp,
          headers);
}
 
Example #12
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that when a call to ackMessages() fails, that the message is not redelivered to Kafka if
 * the message is received again by Cloud Pub/Sub. Also tests that ack ids are added properly if
 * the ack id has not been seen before.
 */
@Test
public void testPollWithDuplicateReceivedMessages() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm1).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  assertEquals(1, result.size());
  ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>());
  stubbedPullResponse =
      PullResponse.newBuilder().addReceivedMessages(0, rm1).addReceivedMessages(1, rm2).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  result = task.poll();
  assertEquals(1, result.size());
}
 
Example #13
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE} and {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageTimestampAttribute() throws Exception{
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE, KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
          new SourceRecord(
                  null,
                  null,
                  KAFKA_TOPIC,
                  0,
                  Schema.OPTIONAL_STRING_SCHEMA,
                  KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
                  Schema.BYTES_SCHEMA,
                  KAFKA_VALUE, Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE));
  assertRecordsEqual(expected, result.get(0));
}
 
Example #14
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/** Tests that the correct partition is assigned when the partition scheme is "hash_value". */
@Test
public void testPollWithPartitionSchemeHashValue() throws Exception {
  props.put(
      CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
      CloudPubSubSourceConnector.PartitionScheme.HASH_VALUE.toString());
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          KAFKA_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS),
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example #15
Source File: SourceRecordBuilder.java    From kafka-connect-couchbase with Apache License 2.0 6 votes vote down vote up
@Stability.Internal
public SourceRecord build(Map<String, ?> sourcePartition,
                          Map<String, ?> sourceOffset,
                          String defaultTopic) {
  return new SourceRecord(
      sourcePartition,
      sourceOffset,
      defaultIfNull(topic, defaultTopic),
      kafkaPartition,
      keySchema,
      key,
      valueSchema,
      value,
      timestamp,
      headers);
}
 
Example #16
Source File: SourceRecordConcurrentLinkedDequeTest.java    From connect-utils with Apache License 2.0 6 votes vote down vote up
@Test
public void drain() throws InterruptedException {
  List<SourceRecord> records = new ArrayList<>(256);
  assertFalse(this.sourceRecords.drain(records), "drain should return false");
  assertTrue(records.isEmpty(), "records should be empty");

  final int EXPECTED_COUNT = 5;
  for (int i = 0; i < EXPECTED_COUNT; i++) {
    SourceRecord record = new SourceRecord(null, null, null, null, null);
    this.sourceRecords.add(record);
  }

  assertEquals(EXPECTED_COUNT, this.sourceRecords.size(), "sourceRecords.size() should match.");
  assertTrue(this.sourceRecords.drain(records), "drain should return true");
  assertTrue(this.sourceRecords.isEmpty(), "drain should have emptied the deque.");
  assertEquals(EXPECTED_COUNT, records.size(), "records.size()");
}
 
Example #17
Source File: GitHubSourceTask.java    From kafka-connect-github-source with MIT License 5 votes vote down vote up
@Override
public List<SourceRecord> poll() throws InterruptedException {
    gitHubHttpAPIClient.sleepIfNeed();

    // fetch data
    final ArrayList<SourceRecord> records = new ArrayList<>();
    JSONArray issues = gitHubHttpAPIClient.getNextIssues(nextPageToVisit, nextQuerySince);
    // we'll count how many results we get with i
    int i = 0;
    for (Object obj : issues) {
        Issue issue = Issue.fromJson((JSONObject) obj);
        SourceRecord sourceRecord = generateSourceRecord(issue);
        records.add(sourceRecord);
        i += 1;
        lastUpdatedAt = issue.getUpdatedAt();
    }
    if (i > 0) log.info(String.format("Fetched %s record(s)", i));
    if (i == 100){
        // we have reached a full batch, we need to get the next one
        nextPageToVisit += 1;
    }
    else {
        nextQuerySince = lastUpdatedAt.plusSeconds(1);
        nextPageToVisit = 1;
        gitHubHttpAPIClient.sleep();
    }
    return records;
}
 
Example #18
Source File: GitHubSourceTask.java    From kafka-connect-github-source with MIT License 5 votes vote down vote up
private SourceRecord generateSourceRecord(Issue issue) {
    return new SourceRecord(
            sourcePartition(),
            sourceOffset(issue.getUpdatedAt()),
            config.getTopic(),
            null, // partition will be inferred by the framework
            KEY_SCHEMA,
            buildRecordKey(issue),
            VALUE_SCHEMA,
            buildRecordValue(issue),
            issue.getUpdatedAt().toEpochMilli());
}
 
Example #19
Source File: MirusSourceTaskTest.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test
public void testSimplePollReturnsExpectedRecords() {
  mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, new byte[] {}, new byte[] {}));
  mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, new byte[] {}, new byte[] {}));
  List<SourceRecord> result = mirusSourceTask.poll();
  assertThat(result.size(), is(2));

  SourceRecord sourceRecord = result.get(0);
  assertThat(sourceRecord.headers().size(), is(0));
  assertThat(sourceRecord.kafkaPartition(), is(nullValue())); // Since partition matching is off
  assertThat(sourceRecord.keySchema().type(), is(ConnectSchema.BYTES_SCHEMA.type()));
  assertThat(sourceRecord.valueSchema().type(), is(ConnectSchema.BYTES_SCHEMA.type()));
  assertThat(sourceRecord.timestamp(), is(-1L)); // Since the source record has no timestamp
}
 
Example #20
Source File: FsSourceTaskTest.java    From kafka-connect-fs with Apache License 2.0 5 votes vote down vote up
protected void checkRecords(List<SourceRecord> records) {
    records.forEach(record -> {
        assertEquals("topic_test", record.topic());
        assertNotNull(record.sourcePartition());
        assertNotNull(record.sourceOffset());
        assertNotNull(record.value());

        assertNotNull(((Struct) record.value()).get(TextFileReader.FIELD_NAME_VALUE_DEFAULT));
    });
}
 
Example #21
Source File: MirusSourceTaskTest.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test
public void testJsonConverterRecord() {
  Map<String, String> properties = mockTaskProperties();
  properties.put(
      SourceConfigDefinition.SOURCE_KEY_CONVERTER.getKey(),
      "org.apache.kafka.connect.json.JsonConverter");
  properties.put(
      SourceConfigDefinition.SOURCE_VALUE_CONVERTER.getKey(),
      "org.apache.kafka.connect.json.JsonConverter");

  mirusSourceTask.start(properties);
  mockConsumer.addRecord(
      new ConsumerRecord<>(
          TOPIC,
          0,
          0,
          "{\"schema\": {\"type\": \"struct\",\"fields\": [{\"type\": \"string\",\"optional\": true,\"field\": \"id\"}],\"optional\": false},\"payload\": {\"id\": \"hiThereMirusKey\"}}"
              .getBytes(StandardCharsets.UTF_8),
          "{\"schema\": {\"type\": \"struct\",\"fields\": [{\"type\": \"string\",\"optional\": true,\"field\": \"id\"}],\"optional\": false},\"payload\": {\"id\": \"hiThereMirusValue\"}}"
              .getBytes(StandardCharsets.UTF_8)));

  List<SourceRecord> result = mirusSourceTask.poll();
  assertThat(result.size(), is(1));

  SourceRecord sourceRecord = result.get(0);
  assertThat(sourceRecord.headers().size(), is(0));
  assertThat(sourceRecord.kafkaPartition(), is(nullValue())); // Since partition matching is off
  assertThat(sourceRecord.keySchema().type(), is(Schema.Type.STRUCT));
  assertThat(sourceRecord.valueSchema().type(), is(Schema.Type.STRUCT));
  assertThat(sourceRecord.timestamp(), is(-1L)); // Since the source record has no timestamp
}
 
Example #22
Source File: CouchbaseSourceTask.java    From kafka-connect-couchbase with Apache License 2.0 5 votes vote down vote up
private SourceRecord convertToSourceRecord(DocumentEvent docEvent) {
  String defaultTopic = getDefaultTopic(docEvent);

  SourceRecordBuilder builder = sourceHandler.handle(new SourceHandlerParams(docEvent, defaultTopic));
  if (builder == null) {
    return null;
  }
  return builder.build(
      sourcePartition(docEvent.partition()),
      sourceOffset(docEvent),
      defaultTopic);
}
 
Example #23
Source File: KafkaConnectSource.java    From pulsar with Apache License 2.0 5 votes vote down vote up
KafkaSourceRecord(SourceRecord srcRecord) {
    super(srcRecord);
    AvroData avroData = new AvroData(1000);
    byte[] keyBytes = keyConverter.fromConnectData(
            srcRecord.topic(), srcRecord.keySchema(), srcRecord.key());
    this.key = keyBytes != null ? Optional.of(Base64.getEncoder().encodeToString(keyBytes)) : Optional.empty();

    byte[] valueBytes = valueConverter.fromConnectData(
            srcRecord.topic(), srcRecord.valueSchema(), srcRecord.value());

    this.value = new KeyValue<>(keyBytes, valueBytes);

    this.topicName = Optional.of(srcRecord.topic());

    if (srcRecord.keySchema() != null) {
        keySchema = readerCache.getIfPresent(srcRecord.keySchema());
    }
    if (srcRecord.valueSchema() != null) {
        valueSchema = readerCache.getIfPresent(srcRecord.valueSchema());
    }

    if (srcRecord.keySchema() != null && keySchema == null) {
        keySchema = new KafkaSchemaWrappedSchema(
                avroData.fromConnectSchema(srcRecord.keySchema()), keyConverter);
        readerCache.put(srcRecord.keySchema(), keySchema);
    }

    if (srcRecord.valueSchema() != null && valueSchema == null) {
        valueSchema = new KafkaSchemaWrappedSchema(
                avroData.fromConnectSchema(srcRecord.valueSchema()), valueConverter);
        readerCache.put(srcRecord.valueSchema(), valueSchema);
    }

    this.eventTime = Optional.ofNullable(srcRecord.timestamp());
    this.partitionId = Optional.of(srcRecord.sourcePartition()
        .entrySet()
        .stream()
        .map(e -> e.getKey() + "=" + e.getValue())
        .collect(Collectors.joining(",")));
}
 
Example #24
Source File: CamelSourceTask.java    From camel-kafka-connector with Apache License 2.0 5 votes vote down vote up
private void setAdditionalHeaders(SourceRecord record, Map<String, Object> map, String prefix) {

        for (Map.Entry<String, Object> entry : map.entrySet()) {
            String key = entry.getKey();
            Object value = entry.getValue();
            String keyCamelHeader = prefix + key;

            if (value instanceof String) {
                record.headers().addString(keyCamelHeader, (String)value);
            } else if (value instanceof Boolean) {
                record.headers().addBoolean(keyCamelHeader, (boolean)value);
            } else if (value instanceof Byte) {
                record.headers().addByte(keyCamelHeader, (byte)value);
            } else if (value instanceof Byte[]) {
                record.headers().addBytes(keyCamelHeader, (byte[])value);
            } else if (value instanceof Time) {
                record.headers().addTime(keyCamelHeader, (Time)value);
            } else if (value instanceof Timestamp) {
                record.headers().addTimestamp(keyCamelHeader, (Timestamp)value);
            } else if (value instanceof Date) {
                SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
                String convertedDate = sdf.format(value);
                record.headers().addString(keyCamelHeader, (String)convertedDate);
            } else if (value instanceof BigDecimal) {
                record.headers().addDecimal(keyCamelHeader, (BigDecimal)value);
            } else if (value instanceof Double) {
                record.headers().addDouble(keyCamelHeader, (double)value);
            } else if (value instanceof Float) {
                record.headers().addFloat(keyCamelHeader, (float)value);
            } else if (value instanceof Integer) {
                record.headers().addInt(keyCamelHeader, (int)value);
            } else if (value instanceof Long) {
                record.headers().addLong(keyCamelHeader, (long)value);
            } else if (value instanceof Short) {
                record.headers().addShort(keyCamelHeader, (short)value);
            }
        }
    }
 
Example #25
Source File: OracleConnectorIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
@FixFor("DBZ-800")
public void shouldReceiveHeartbeatAlsoWhenChangingNonWhitelistedTable() throws Exception {
    TestHelper.dropTable(connection, "debezium.dbz800a");
    TestHelper.dropTable(connection, "debezium.dbz800b");

    // the low heartbeat interval should make sure that a heartbeat message is emitted after each change record
    // received from Postgres
    Configuration config = TestHelper.defaultConfig()
            .with(Heartbeat.HEARTBEAT_INTERVAL, "1")
            .with(OracleConnectorConfig.TABLE_WHITELIST, "DEBEZIUM\\.DBZ800B")
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);

    connection.execute("CREATE TABLE debezium.dbz800a (id NUMBER(9) NOT NULL, aaa VARCHAR2(100), PRIMARY KEY (id) )");
    connection.execute("CREATE TABLE debezium.dbz800b (id NUMBER(9) NOT NULL, bbb VARCHAR2(100), PRIMARY KEY (id) )");
    connection.execute("INSERT INTO debezium.dbz800a VALUES (1, 'AAA')");
    connection.execute("INSERT INTO debezium.dbz800b VALUES (2, 'BBB')");
    connection.execute("COMMIT");

    // expecting two heartbeat records and one actual change record
    List<SourceRecord> records = consumeRecordsByTopic(3).allRecordsInOrder();

    // expecting no change record for s1.a but a heartbeat
    verifyHeartbeatRecord(records.get(0));

    // and then a change record for s1.b and a heartbeat
    verifyHeartbeatRecord(records.get(1));
    VerifyRecord.isValidInsert(records.get(2), "ID", 2);
}
 
Example #26
Source File: OracleConnectorIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldReadChangeStreamForTableCreatedWhileStreaming() throws Exception {
    TestHelper.dropTable(connection, "debezium.customer2");

    Configuration config = TestHelper.defaultConfig()
            .with(RelationalDatabaseConnectorConfig.TABLE_WHITELIST, "DEBEZIUM\\.CUSTOMER2")
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);

    String ddl = "create table debezium.customer2 (" +
            "  id numeric(9,0) not null, " +
            "  name varchar2(1000), " +
            "  score decimal(6, 2), " +
            "  registered timestamp, " +
            "  primary key (id)" +
            ")";

    connection.execute(ddl);
    connection.execute("GRANT SELECT ON debezium.customer2 to " + TestHelper.CONNECTOR_USER);

    connection.execute("INSERT INTO debezium.customer2 VALUES (2, 'Billie-Bob', 1234.56, TO_DATE('2018/02/22', 'yyyy-mm-dd'))");
    connection.execute("COMMIT");

    SourceRecords records = consumeRecordsByTopic(1);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.CUSTOMER2");
    assertThat(testTableRecords).hasSize(1);

    VerifyRecord.isValidInsert(testTableRecords.get(0), "ID", 2);
    Struct after = (Struct) ((Struct) testTableRecords.get(0).value()).get("after");
    assertThat(after.get("ID")).isEqualTo(2);
    assertThat(after.get("NAME")).isEqualTo("Billie-Bob");
    assertThat(after.get("SCORE")).isEqualTo(BigDecimal.valueOf(1234.56));
    assertThat(after.get("REGISTERED")).isEqualTo(toMicroSecondsSinceEpoch(LocalDateTime.of(2018, 2, 22, 0, 0, 0)));
}
 
Example #27
Source File: TransactionMetadataIT.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void transactionMetadata() throws Exception {
    Configuration config = TestHelper.defaultConfig()
            .with(RelationalDatabaseConnectorConfig.TABLE_WHITELIST, "DEBEZIUM\\.CUSTOMER")
            .with(OracleConnectorConfig.SNAPSHOT_MODE, SnapshotMode.SCHEMA_ONLY)
            .with(OracleConnectorConfig.PROVIDE_TRANSACTION_METADATA, true)
            .build();

    start(OracleConnector.class, config);
    assertConnectorIsRunning();

    waitForSnapshotToBeCompleted(TestHelper.CONNECTOR_NAME, TestHelper.SERVER_NAME);

    // Testing.Print.enable();
    connection.execute("INSERT INTO debezium.customer VALUES (1, 'Billie-Bob', 1234.56, TO_DATE('2018/02/22', 'yyyy-mm-dd'))");
    connection.execute("COMMIT");

    // TX BEGIN, insert, TX END
    final int expectedRecordCount = 1 + 1 + 1;
    List<SourceRecord> records = consumeRecordsByTopic(expectedRecordCount).allRecordsInOrder();
    assertThat(records).hasSize(expectedRecordCount);

    final String expectedTxId = assertBeginTransaction(records.get(0));

    // insert
    VerifyRecord.isValidInsert(records.get(1), "ID", 1);
    Struct after = (Struct) ((Struct) records.get(1).value()).get("after");
    assertThat(after.get("ID")).isEqualTo(1);
    assertThat(after.get("NAME")).isEqualTo("Billie-Bob");
    assertThat(after.get("SCORE")).isEqualTo(BigDecimal.valueOf(1234.56));
    assertRecordTransactionMetadata(records.get(1), expectedTxId, 1, 1);

    assertEndTransaction(records.get(2), expectedTxId, 1, Collect.hashMapOf("ORCLPDB1.DEBEZIUM.CUSTOMER", 1));
}
 
Example #28
Source File: ZeebeSourceTask.java    From kafka-connect-zeebe with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("squid:S1168")
@Override
public List<SourceRecord> poll() {
  if (!inflightRegistry.hasCapacity()) {
    LOGGER.trace("No capacity left to poll new jobs, returning control to caller after backoff");
    backoff.backoff();
    return null;
  }

  final List<SourceRecord> records =
      inflightRegistry
          .jobTypesWithCapacity()
          .flatMap(this::fetchJobs)
          .map(inflightRegistry::registerJob)
          .map(this::transformJob)
          .collect(Collectors.toList());

  // poll interface specifies to return null instead of empty
  if (records.isEmpty()) {
    LOGGER.trace("Nothing to publish, returning control to caller after backoff");
    backoff.backoff();
    return null;
  }

  LOGGER.debug("Publishing {} source records", records.size());
  return records;
}
 
Example #29
Source File: AbstractOracleDatatypesTest.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Test
public void stringTypes() throws Exception {
    int expectedRecordCount = 0;

    if (insertRecordsDuringTest()) {
        insertStringTypes();
    }

    Testing.debug("Inserted");
    expectedRecordCount++;

    final SourceRecords records = consumeRecordsByTopic(expectedRecordCount);

    List<SourceRecord> testTableRecords = records.recordsForTopic("server1.DEBEZIUM.TYPE_STRING");
    assertThat(testTableRecords).hasSize(expectedRecordCount);
    SourceRecord record = testTableRecords.get(0);

    VerifyRecord.isValid(record);

    // insert
    if (insertRecordsDuringTest()) {
        VerifyRecord.isValidInsert(record, "ID", 1);
    }
    else {
        VerifyRecord.isValidRead(record, "ID", 1);
    }

    Struct after = (Struct) ((Struct) record.value()).get("after");
    assertRecord(after, EXPECTED_STRING);
}
 
Example #30
Source File: SourceRecordDequeTest.java    From connect-utils with Apache License 2.0 5 votes vote down vote up
@Test
public void empty() {
  SourceRecordDeque deque = this.builder.build();
  List<SourceRecord> records = deque.newList();
  assertFalse(deque.drain(records, 100), "deque is empty. False should have been returned." );
  verify(this.time, atLeastOnce()).sleep(100);
}