org.apache.kafka.connect.source.SourceRecord Java Examples
The following examples show how to use
org.apache.kafka.connect.source.SourceRecord.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CamelTypeConverterTransformTest.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void testIfItConvertsConnectRecordCorrectly() { final SourceRecord connectRecord = new SourceRecord(Collections.emptyMap(), Collections.emptyMap(), "topic", Schema.STRING_SCHEMA, "1234", Schema.STRING_SCHEMA, "TRUE"); final Map<String, Object> propsForKeySmt = new HashMap<>(); propsForKeySmt.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, Integer.class.getName()); final Map<String, Object> propsForValueSmt = new HashMap<>(); propsForValueSmt.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, "java.lang.Boolean"); final Transformation<SourceRecord> transformationKey = new CamelTypeConverterTransform.Key<>(); final Transformation<SourceRecord> transformationValue = new CamelTypeConverterTransform.Value<>(); transformationKey.configure(propsForKeySmt); transformationValue.configure(propsForValueSmt); final SourceRecord transformedKeySourceRecord = transformationKey.apply(connectRecord); final SourceRecord transformedValueSourceRecord = transformationValue.apply(connectRecord); assertEquals(1234, transformedKeySourceRecord.key()); assertEquals(Schema.INT32_SCHEMA, transformedKeySourceRecord.keySchema()); assertEquals(true, transformedValueSourceRecord.value()); assertEquals(Schema.BOOLEAN_SCHEMA, transformedValueSourceRecord.valueSchema()); }
Example #2
Source File: CamelSourceTaskTest.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void testSourcePolling() { final long size = 2; Map<String, String> props = new HashMap<>(); props.put(CamelSourceConnectorConfig.TOPIC_CONF, TOPIC_NAME); props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_URL_CONF, DIRECT_URI); CamelSourceTask sourceTask = new CamelSourceTask(); sourceTask.start(props); sendBatchOfRecords(sourceTask, size); List<SourceRecord> poll = sourceTask.poll(); assertEquals(size, poll.size()); assertEquals(TOPIC_NAME, poll.get(0).topic()); assertEquals(LoggingLevel.OFF.toString(), sourceTask.getCamelSourceConnectorConfig(props) .getString(CamelSourceConnectorConfig.CAMEL_SOURCE_CONTENT_LOG_LEVEL_CONF)); sourceTask.stop(); }
Example #3
Source File: CamelSourceTaskTest.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void testSourcePollingMaxBatchPollSize() { final long size = 2; Map<String, String> props = new HashMap<>(); props.put(CamelSourceConnectorConfig.TOPIC_CONF, TOPIC_NAME); props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_URL_CONF, DIRECT_URI); props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_MAX_BATCH_POLL_SIZE_CONF, String.valueOf(size)); CamelSourceTask sourceTask = new CamelSourceTask(); sourceTask.start(props); sendBatchOfRecords(sourceTask, size + 1); List<SourceRecord> poll = sourceTask.poll(); int pollSize = poll.size(); assertTrue(pollSize >= 0 && pollSize <= size, "Batch size: " + pollSize + ", expected between 0 and " + size); sourceTask.stop(); }
Example #4
Source File: CamelSourceTaskTest.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
@Test public void testSourcePollingTimeout() { final long size = 999; Map<String, String> props = new HashMap<>(); props.put(CamelSourceConnectorConfig.TOPIC_CONF, TOPIC_NAME); props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_URL_CONF, DIRECT_URI); props.put(CamelSourceConnectorConfig.CAMEL_SOURCE_MAX_POLL_DURATION_CONF, "2"); CamelSourceTask sourceTask = new CamelSourceTask(); sourceTask.start(props); sendBatchOfRecords(sourceTask, size); List<SourceRecord> poll = sourceTask.poll(); int pollSize = poll.size(); assertTrue(pollSize < size, "Batch size: " + pollSize + ", expected strictly less than " + size); sourceTask.stop(); }
Example #5
Source File: AbstractKafkaConnectSource.java From hazelcast-jet-contrib with Apache License 2.0 | 6 votes |
public void fillBuffer(SourceBuilder.TimestampedSourceBuffer<T> buf) { if (!taskInit) { task.initialize(new JetSourceTaskContext()); task.start(taskConfig); taskInit = true; } try { List<SourceRecord> records = task.poll(); if (records == null) { return; } for (SourceRecord record : records) { boolean added = addToBuffer(record, buf); if (added) { partitionsToOffset.put(record.sourcePartition(), record.sourceOffset()); } } } catch (InterruptedException e) { throw rethrow(e); } }
Example #6
Source File: SourceRecordBuilder.java From kafka-connect-couchbase with Apache License 2.0 | 6 votes |
@Stability.Internal public SourceRecord build(Map<String, ?> sourcePartition, Map<String, ?> sourceOffset, String defaultTopic) { return new SourceRecord( sourcePartition, sourceOffset, defaultIfNull(topic, defaultTopic), kafkaPartition, keySchema, key, valueSchema, value, timestamp, headers); }
Example #7
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link * #KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE} and {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}. */ @Test public void testPollWithMessageTimestampAttribute() throws Exception{ task.start(props); Map<String, String> attributes = new HashMap<>(); attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE); attributes.put(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE, KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, 0, Schema.OPTIONAL_STRING_SCHEMA, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE, Schema.BYTES_SCHEMA, KAFKA_VALUE, Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE)); assertRecordsEqual(expected, result.get(0)); }
Example #8
Source File: CloudPubSubSourceTask.java From pubsub with Apache License 2.0 | 6 votes |
private SourceRecord createRecordWithHeaders(Map<String, String> messageAttributes, Map<String,String> ack, String key, byte[] messageBytes, Long timestamp) { ConnectHeaders headers = new ConnectHeaders(); for (Entry<String, String> attribute : messageAttributes.entrySet()) { if (!attribute.getKey().equals(kafkaMessageKeyAttribute)) { headers.addString(attribute.getKey(), attribute.getValue()); } } return new SourceRecord( null, ack, kafkaTopic, selectPartition(key, messageBytes), Schema.OPTIONAL_STRING_SCHEMA, key, Schema.BYTES_SCHEMA, messageBytes, timestamp, headers); }
Example #9
Source File: KafkaMonitor.java From mirus with BSD 3-Clause "New" or "Revised" License | 6 votes |
private List<Transformation<SourceRecord>> validateTransformations( List<Transformation<SourceRecord>> transformations) { List<Transformation<SourceRecord>> regexRouters = new ArrayList<>(); // No need to validate transforms if we're not checking destination partitions if (this.topicCheckingEnabled) { for (Transformation<SourceRecord> transform : transformations) { String transformName = transform.getClass().getSimpleName(); if (transform instanceof RegexRouter) { regexRouters.add(transform); // Slightly awkward check to see if any other routing transforms are configured } else if (transformName.contains("Router")) { throw new IllegalArgumentException( String.format( "Unsupported Router Transformation %s found." + " To use it, please disable destination topic checking by setting 'enable.destination.topic.checking' to false.", transformName)); } else { logger.debug("Ignoring non-routing Transformation {}", transformName); } } } return regexRouters; }
Example #10
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** * Tests that when a call to ackMessages() fails, that the message is not redelivered to Kafka if * the message is received again by Cloud Pub/Sub. Also tests that ack ids are added properly if * the ack id has not been seen before. */ @Test public void testPollWithDuplicateReceivedMessages() throws Exception { task.start(props); ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm1).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); assertEquals(1, result.size()); ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>()); stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(0, rm1).addReceivedMessages(1, rm2).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); result = task.poll(); assertEquals(1, result.size()); }
Example #11
Source File: CloudPubSubSourceTaskTest.java From pubsub with Apache License 2.0 | 6 votes |
/** Tests that the correct partition is assigned when the partition scheme is "hash_value". */ @Test public void testPollWithPartitionSchemeHashValue() throws Exception { props.put( CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG, CloudPubSubSourceConnector.PartitionScheme.HASH_VALUE.toString()); task.start(props); ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>()); PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build(); when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse); List<SourceRecord> result = task.poll(); verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class)); assertEquals(1, result.size()); SourceRecord expected = new SourceRecord( null, null, KAFKA_TOPIC, KAFKA_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS), Schema.OPTIONAL_STRING_SCHEMA, null, Schema.BYTES_SCHEMA, KAFKA_VALUE); assertRecordsEqual(expected, result.get(0)); }
Example #12
Source File: MongodbSourceTask.java From kafka-connect-mongodb with Apache License 2.0 | 6 votes |
/** * Poll this MongodbSourceTask for new records. * * @return a list of source records * @throws InterruptException */ @Override public List<SourceRecord> poll() throws InterruptException { List<SourceRecord> records = new ArrayList<>(); while (!reader.isEmpty()) { Document message = reader.pool(); Struct messageStruct = getStruct(message); String topic = getTopic(message); String db = getDB(message); String timestamp = getTimestamp(message); records.add(new SourceRecord(Collections.singletonMap("mongodb", db), Collections.singletonMap(db, timestamp), topic, messageStruct.schema(), messageStruct)); log.trace(message.toString()); } return records; }
Example #13
Source File: SourceRecordConcurrentLinkedDequeTest.java From connect-utils with Apache License 2.0 | 6 votes |
@Test public void drain() throws InterruptedException { List<SourceRecord> records = new ArrayList<>(256); assertFalse(this.sourceRecords.drain(records), "drain should return false"); assertTrue(records.isEmpty(), "records should be empty"); final int EXPECTED_COUNT = 5; for (int i = 0; i < EXPECTED_COUNT; i++) { SourceRecord record = new SourceRecord(null, null, null, null, null); this.sourceRecords.add(record); } assertEquals(EXPECTED_COUNT, this.sourceRecords.size(), "sourceRecords.size() should match."); assertTrue(this.sourceRecords.drain(records), "drain should return true"); assertTrue(this.sourceRecords.isEmpty(), "drain should have emptied the deque."); assertEquals(EXPECTED_COUNT, records.size(), "records.size()"); }
Example #14
Source File: IgniteSourceTask.java From ignite with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ @Override public List<SourceRecord> poll() throws InterruptedException { ArrayList<SourceRecord> records = new ArrayList<>(evtBatchSize); ArrayList<CacheEvent> evts = new ArrayList<>(evtBatchSize); if (stopped) return records; try { if (evtBuf.drainTo(evts, evtBatchSize) > 0) { for (CacheEvent evt : evts) { // schema and keys are ignored. for (String topic : topics) records.add(new SourceRecord(srcPartition, offset, topic, null, evt)); } return records; } } catch (IgniteException e) { log.error("Error when polling event queue!", e); } // for shutdown. return null; }
Example #15
Source File: SpoolDirLineDelimitedSourceTask.java From kafka-connect-spooldir with Apache License 2.0 | 6 votes |
@Override protected List<SourceRecord> process() throws IOException { int recordCount = 0; List<SourceRecord> records = new ArrayList<>(this.config.batchSize); String line = null; while (recordCount < this.config.batchSize && null != (line = this.reader.readLine())) { SourceRecord record = record( null, new SchemaAndValue(Schema.STRING_SCHEMA, line), null ); records.add(record); recordCount++; } return records; }
Example #16
Source File: ZeebeSourceTask.java From kafka-connect-zeebe with Apache License 2.0 | 6 votes |
private SourceRecord transformJob(final ActivatedJob job) { final String topic = topicExtractor.extract(job); final Map<String, Integer> sourcePartition = Collections.singletonMap("partitionId", decodePartitionId(job.getKey())); // a better sourceOffset would be the position but we don't have it here unfortunately // key is however a monotonically increasing value, so in a sense it can provide a good // approximation of an offset final Map<String, Long> sourceOffset = Collections.singletonMap("key", job.getKey()); return new SourceRecord( sourcePartition, sourceOffset, topic, Schema.INT64_SCHEMA, job.getKey(), Schema.STRING_SCHEMA, job.toJson()); }
Example #17
Source File: CamelSourceTask.java From camel-kafka-connector with Apache License 2.0 | 5 votes |
private void setAdditionalHeaders(SourceRecord record, Map<String, Object> map, String prefix) { for (Map.Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); String keyCamelHeader = prefix + key; if (value instanceof String) { record.headers().addString(keyCamelHeader, (String)value); } else if (value instanceof Boolean) { record.headers().addBoolean(keyCamelHeader, (boolean)value); } else if (value instanceof Byte) { record.headers().addByte(keyCamelHeader, (byte)value); } else if (value instanceof Byte[]) { record.headers().addBytes(keyCamelHeader, (byte[])value); } else if (value instanceof Time) { record.headers().addTime(keyCamelHeader, (Time)value); } else if (value instanceof Timestamp) { record.headers().addTimestamp(keyCamelHeader, (Timestamp)value); } else if (value instanceof Date) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); String convertedDate = sdf.format(value); record.headers().addString(keyCamelHeader, (String)convertedDate); } else if (value instanceof BigDecimal) { record.headers().addDecimal(keyCamelHeader, (BigDecimal)value); } else if (value instanceof Double) { record.headers().addDouble(keyCamelHeader, (double)value); } else if (value instanceof Float) { record.headers().addFloat(keyCamelHeader, (float)value); } else if (value instanceof Integer) { record.headers().addInt(keyCamelHeader, (int)value); } else if (value instanceof Long) { record.headers().addLong(keyCamelHeader, (long)value); } else if (value instanceof Short) { record.headers().addShort(keyCamelHeader, (short)value); } } }
Example #18
Source File: TaskHelper.java From camel-kafka-connector with Apache License 2.0 | 5 votes |
public static <CFG extends AbstractConfig> void logRecordContent(Logger logger, ConnectRecord<?> record, CFG config) { if (logger != null && record != null && config != null) { // do not log record's content by default, as it may contain sensitive information LoggingLevel level = LoggingLevel.OFF; try { final String key = (record instanceof SourceRecord) ? CamelSourceConnectorConfig.CAMEL_SOURCE_CONTENT_LOG_LEVEL_CONF : CamelSinkConnectorConfig.CAMEL_SINK_CONTENT_LOG_LEVEL_CONF; level = LoggingLevel.valueOf(config.getString(key).toUpperCase()); } catch (Exception e) { logger.warn("Invalid value for contentLogLevel property"); } switch (level) { case TRACE: logger.trace(record.toString()); break; case DEBUG: logger.debug(record.toString()); break; case INFO: logger.info(record.toString()); break; case WARN: logger.warn(record.toString()); break; case ERROR: logger.error(record.toString()); break; default: break; } } }
Example #19
Source File: MongodbSourceUriTaskTest.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
@Test public void testInsertWithOffsets() { try { expectOffsetLookupReturnOffset(collections); replay(); task.start(sourceProperties); MongoDatabase db = mongoClient.getDatabase("mydb"); Integer numberOfDocuments = new Random().nextInt(new Random().nextInt(100000)); for (int i = 0; i < numberOfDocuments; i++) { Document newDocument = new Document() .append(RandomStringUtils.random(new Random().nextInt(100), true, false), new Random().nextInt()); db.getCollection(collections.get(new Random().nextInt(3))).insertOne(newDocument); } List<SourceRecord> records = new ArrayList<>(); List<SourceRecord> pollRecords; do { pollRecords = task.poll(); for(SourceRecord r : pollRecords) { records.add(r); offsets.putAll((Map<String, Long>)r.sourceOffset()); } } while (!pollRecords.isEmpty()); totalWrittenDocuments += records.size(); Assert.assertEquals(totalWrittenDocuments, records.size()); } catch (Exception e) { System.out.println("------------------------EXCEPTION-------------------------"); e.printStackTrace(); Assert.assertTrue(false); System.out.println("---------------------------END----------------------------"); } }
Example #20
Source File: CamelTypeConverterTransformTest.java From camel-kafka-connector with Apache License 2.0 | 5 votes |
@Test public void testIfItCanHandleEmptyKeyProps() { final Transformation<SourceRecord> transformationKey = new CamelTypeConverterTransform.Key<>(); final Map<String, Object> props = new HashMap<>(); props.put(CamelTypeConverterTransform.FIELD_TARGET_TYPE_CONFIG, Map.class.getName()); assertThrows(ConfigException.class, () -> transformationKey.configure(Collections.emptyMap())); }
Example #21
Source File: IRCFeedTask.java From hello-kafka-streams with Apache License 2.0 | 5 votes |
@Override public void onPrivmsg(String channel, IRCUser u, String msg) { IRCMessage event = new IRCMessage(channel, u, msg); //FIXME kafka round robin default partitioner seems to always publish to partition 0 only (?) long ts = event.getInt64("timestamp"); Map<String, ?> srcOffset = Collections.singletonMap(TIMESTAMP_FIELD, ts); Map<String, ?> srcPartition = Collections.singletonMap(CHANNEL_FIELD, channel); SourceRecord record = new SourceRecord(srcPartition, srcOffset, topic, KEY_SCHEMA, ts, IRCMessage.SCHEMA, event); queue.offer(record); }
Example #22
Source File: CloudPubSubSourceTask.java From pubsub with Apache License 2.0 | 5 votes |
@Override public void commitRecord(SourceRecord record) { String ackId = record.sourceOffset().get(cpsSubscription).toString(); deliveredAckIds.add(ackId); ackIds.remove(ackId); log.trace("Committed {}", ackId); }
Example #23
Source File: CouchbaseSourceTask.java From kafka-connect-couchbase with Apache License 2.0 | 5 votes |
@Override public List<SourceRecord> poll() throws InterruptedException { // If a fatal error occurred in another thread, propagate it. checkErrorQueue(); // Block until at least one item is available or until the // courtesy timeout expires, giving the framework a chance // to pause the connector. DocumentChange firstEvent = queue.poll(1, SECONDS); if (firstEvent == null) { LOGGER.debug("Poll returns 0 results"); return null; // Looks weird, but caller expects it. } List<DocumentChange> events = new ArrayList<>(); try { events.add(firstEvent); queue.drainTo(events, batchSizeMax - 1); List<SourceRecord> results = events.stream() .map(e -> DocumentEvent.create(e, bucket)) .filter(e -> filter.pass(e)) .map(this::convertToSourceRecord) .filter(Objects::nonNull) .collect(toList()); int excluded = events.size() - results.size(); LOGGER.info("Poll returns {} result(s) (filtered out {})", results.size(), excluded); return results; } finally { events.forEach(DocumentChange::flowControlAck); } }
Example #24
Source File: MongodbSourceUriTaskTest.java From kafka-connect-mongodb with Apache License 2.0 | 5 votes |
@Test public void testInsertWithNullOffsets() { try { expectOffsetLookupReturnNone(); replay(); task.start(sourceProperties); MongoDatabase db = mongoClient.getDatabase("mydb"); Integer numberOfDocuments = new Random().nextInt(new Random().nextInt(100000)); for (int i = 0; i < numberOfDocuments; i++) { Document newDocument = new Document() .append(RandomStringUtils.random(new Random().nextInt(100), true, false), new Random().nextInt()); db.getCollection(collections.get(new Random().nextInt(3))).insertOne(newDocument); } List<SourceRecord> records = new ArrayList<>(); List<SourceRecord> pollRecords; do { pollRecords = task.poll(); for(SourceRecord r : pollRecords) { records.add(r); offsets.putAll((Map<String, Long>)r.sourceOffset()); } } while (!pollRecords.isEmpty()); totalWrittenDocuments += records.size(); Assert.assertEquals(totalWrittenDocuments, records.size()); } catch (Exception e) { System.out.println("------------------------EXCEPTION-------------------------"); e.printStackTrace(); Assert.assertTrue(false); System.out.println("---------------------------END----------------------------"); } }
Example #25
Source File: BackupSourceConnector.java From kafka-backup with Apache License 2.0 | 5 votes |
@Override public void start(Map<String, String> props) { config = props; if (!config.getOrDefault(BackupSourceConfig.ALLOW_OLD_KAFKA_CONNECT_VERSION, "false").equals("true")) { try { SourceTask.class.getMethod("commitRecord", SourceRecord.class, RecordMetadata.class); } catch (NoSuchMethodException e) { throw new RuntimeException("Kafka Backup requires at least Kafka Connect 2.4. Otherwise Offsets cannot be committed. If you are sure what you are doing, please set " + BackupSourceConfig.ALLOW_OLD_KAFKA_CONNECT_VERSION + " to true"); } } }
Example #26
Source File: SplunkHttpSourceTask.java From kafka-connect-splunk with Apache License 2.0 | 5 votes |
@Override public List<SourceRecord> poll() throws InterruptedException { List<SourceRecord> records = new ArrayList<>(this.config.batchSize); while (!this.sourceRecordConcurrentLinkedDeque.drain(records)) { log.trace("No records received. Sleeping."); } return records; }
Example #27
Source File: MirusSourceTaskTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Test public void testSimplePollReturnsExpectedRecords() { mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, new byte[] {}, new byte[] {})); mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, new byte[] {}, new byte[] {})); List<SourceRecord> result = mirusSourceTask.poll(); assertThat(result.size(), is(2)); SourceRecord sourceRecord = result.get(0); assertThat(sourceRecord.headers().size(), is(0)); assertThat(sourceRecord.kafkaPartition(), is(nullValue())); // Since partition matching is off assertThat(sourceRecord.keySchema().type(), is(ConnectSchema.BYTES_SCHEMA.type())); assertThat(sourceRecord.valueSchema().type(), is(ConnectSchema.BYTES_SCHEMA.type())); assertThat(sourceRecord.timestamp(), is(-1L)); // Since the source record has no timestamp }
Example #28
Source File: FsSourceTaskTest.java From kafka-connect-fs with Apache License 2.0 | 5 votes |
protected void checkRecords(List<SourceRecord> records) { records.forEach(record -> { assertEquals("topic_test", record.topic()); assertNotNull(record.sourcePartition()); assertNotNull(record.sourceOffset()); assertNotNull(record.value()); assertNotNull(((Struct) record.value()).get(TextFileReader.FIELD_NAME_VALUE_DEFAULT)); }); }
Example #29
Source File: MirusSourceTaskTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Test public void testJsonConverterRecord() { Map<String, String> properties = mockTaskProperties(); properties.put( SourceConfigDefinition.SOURCE_KEY_CONVERTER.getKey(), "org.apache.kafka.connect.json.JsonConverter"); properties.put( SourceConfigDefinition.SOURCE_VALUE_CONVERTER.getKey(), "org.apache.kafka.connect.json.JsonConverter"); mirusSourceTask.start(properties); mockConsumer.addRecord( new ConsumerRecord<>( TOPIC, 0, 0, "{\"schema\": {\"type\": \"struct\",\"fields\": [{\"type\": \"string\",\"optional\": true,\"field\": \"id\"}],\"optional\": false},\"payload\": {\"id\": \"hiThereMirusKey\"}}" .getBytes(StandardCharsets.UTF_8), "{\"schema\": {\"type\": \"struct\",\"fields\": [{\"type\": \"string\",\"optional\": true,\"field\": \"id\"}],\"optional\": false},\"payload\": {\"id\": \"hiThereMirusValue\"}}" .getBytes(StandardCharsets.UTF_8))); List<SourceRecord> result = mirusSourceTask.poll(); assertThat(result.size(), is(1)); SourceRecord sourceRecord = result.get(0); assertThat(sourceRecord.headers().size(), is(0)); assertThat(sourceRecord.kafkaPartition(), is(nullValue())); // Since partition matching is off assertThat(sourceRecord.keySchema().type(), is(Schema.Type.STRUCT)); assertThat(sourceRecord.valueSchema().type(), is(Schema.Type.STRUCT)); assertThat(sourceRecord.timestamp(), is(-1L)); // Since the source record has no timestamp }
Example #30
Source File: DynamoDbSourceTask.java From kafka-connect-dynamodb with Apache License 2.0 | 5 votes |
@Override public List<SourceRecord> poll() throws InterruptedException { // TODO rate limiting? if (assignedShards.isEmpty()) { throw new ConnectException("No remaining source shards"); } final String shardId = assignedShards.get(currentShardIdx); final GetRecordsRequest req = new GetRecordsRequest(); req.setShardIterator(shardIterator(shardId)); req.setLimit(100); // TODO configurable final GetRecordsResult rsp = streamsClient.getRecords(req); if (rsp.getNextShardIterator() == null) { log.info("Shard ID `{}` for table `{}` has been closed, it will no longer be polled", shardId, config.tableForShard(shardId)); shardIterators.remove(shardId); assignedShards.remove(shardId); } else { log.debug("Retrieved {} records from shard ID `{}`", rsp.getRecords().size(), shardId); shardIterators.put(shardId, rsp.getNextShardIterator()); } currentShardIdx = (currentShardIdx + 1) % assignedShards.size(); final String tableName = config.tableForShard(shardId); final String topic = config.topicFormat.replace("${table}", tableName); final Map<String, String> sourcePartition = sourcePartition(shardId); return rsp.getRecords().stream() .map(dynamoRecord -> toSourceRecord(sourcePartition, topic, dynamoRecord.getDynamodb())) .collect(Collectors.toList()); }