org.apache.kafka.connect.storage.OffsetStorageReader Java Examples
The following examples show how to use
org.apache.kafka.connect.storage.OffsetStorageReader.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MySqlSourceTaskTest.java From kafka-mysql-connector with Apache License 2.0 | 6 votes |
@Before public void setup() throws IOException, SQLException { String mysqlHost = "10.100.172.86"; connection = DriverManager.getConnection("jdbc:mysql://" + mysqlHost + ":3306/mysql", "root", "passwd"); config = new HashMap<>(); config.put(MySqlSourceConnector.USER_CONFIG, "maxwell"); config.put(MySqlSourceConnector.PASSWORD_CONFIG, "XXXXXX"); config.put(MySqlSourceConnector.PORT_CONFIG, "3306"); config.put(MySqlSourceConnector.HOST_CONFIG, mysqlHost); task = new MySqlSourceTask(); offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class); context = PowerMock.createMock(SourceTaskContext.class); task.initialize(context); runSql("drop table if exists test.users"); runSql("drop database if exists test"); }
Example #2
Source File: KafkaSourceTaskTest.java From MirrorTool-for-Kafka-Connect with Apache License 2.0 | 6 votes |
@Before public void setup() { opts = new HashMap<>(); opts.put(KafkaSourceConnectorConfig.SOURCE_TOPIC_WHITELIST_CONFIG, SOURCE_TOPICS_WHITELIST_VALUE); opts.put(KafkaSourceConnectorConfig.MAX_SHUTDOWN_WAIT_MS_CONFIG, MAX_SHUTDOWN_WAIT_MS_VALUE); opts.put(KafkaSourceConnectorConfig.POLL_LOOP_TIMEOUT_MS_CONFIG, String.valueOf(POLL_LOOP_TIMEOUT_MS_VALUE)); opts.put(KafkaSourceConnectorConfig.INCLUDE_MESSAGE_HEADERS_CONFIG, INCLUDE_MESSAGE_HEADERS_VALUE); opts.put(KafkaSourceConnectorConfig.CONSUMER_AUTO_OFFSET_RESET_CONFIG, CONSUMER_AUTO_OFFSET_RESET_VALUE); opts.put(KafkaSourceConnectorConfig.SOURCE_BOOTSTRAP_SERVERS_CONFIG, SOURCE_BOOTSTRAP_SERVERS_VALUE); opts.put(KafkaSourceConnectorConfig.TASK_LEADER_TOPIC_PARTITION_CONFIG, TASK_LEADER_TOPIC_PARTITION_VALUE); opts.put(KafkaSourceConnectorConfig.CONSUMER_AUTO_OFFSET_RESET_CONFIG, AUTO_OFFSET_RESET_VALUE); opts.put(KafkaSourceConnectorConfig.CONSUMER_GROUP_ID_CONFIG, CONSUMER_GROUP_ID_VALUE); config = new KafkaSourceConnectorConfig(opts); objectUnderTest = new KafkaSourceTask(); offsetStorageReader = createMock(OffsetStorageReader.class); context = createMock(SourceTaskContext.class); consumer = createMock(KafkaConsumer.class); objectUnderTest.initialize(context); }
Example #3
Source File: AbstractPolicy.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
@Override public FileReader offer(FileMetadata metadata, OffsetStorageReader offsetStorageReader) { FileSystem current = fileSystems.stream() .filter(fs -> metadata.getPath().startsWith(fs.getWorkingDirectory().toString())) .findFirst() .orElse(null); try { FileReader reader = ReflectionUtils.makeReader( (Class<? extends FileReader>) conf.getClass(FsSourceTaskConfig.FILE_READER_CLASS), current, new Path(metadata.getPath()), conf.originals()); Map<String, Object> partition = Collections.singletonMap("path", metadata.getPath()); Map<String, Object> offset = offsetStorageReader.offset(partition); if (offset != null && offset.get("offset") != null) { log.info("Seeking to offset [{}] for file [{}].", offset.get("offset"), metadata.getPath()); reader.seek((Long) offset.get("offset")); } return reader; } catch (Exception e) { throw new ConnectException("An error has occurred when creating reader for file: " + metadata.getPath(), e); } }
Example #4
Source File: MirusSourceTaskTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Before public void setUp() { mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); mockConsumer.updatePartitions( TOPIC, Arrays.asList( new PartitionInfo(TOPIC, 0, null, null, null), new PartitionInfo(TOPIC, 1, null, null, null))); mirusSourceTask = new MirusSourceTask(consumerProperties -> mockConsumer); // Always return offset = 0 SourceTaskContext context = new SourceTaskContext() { @Override public Map<String, String> configs() { return null; } @Override public OffsetStorageReader offsetStorageReader() { return new OffsetStorageReader() { @Override public <T> Map<String, Object> offset(Map<String, T> partition) { return new HashMap<>(MirusSourceTask.offsetMap(0L)); } @Override public <T> Map<Map<String, T>, Map<String, Object>> offsets( Collection<Map<String, T>> partitions) { return partitions.stream().collect(Collectors.toMap(p -> p, this::offset)); } }; } }; mirusSourceTask.initialize(context); mirusSourceTask.start(mockTaskProperties()); }
Example #5
Source File: FileStreamSourceTaskTest.java From kafka-connector-skeleton with Apache License 2.0 | 5 votes |
@Before public void setup() throws IOException { tempFile = File.createTempFile("file-stream-source-task-test", null); config = new HashMap<>(); config.put(FileStreamSourceConnector.FILE_CONFIG, tempFile.getAbsolutePath()); config.put(FileStreamSourceConnector.TOPIC_CONFIG, TOPIC); task = new FileStreamSourceTask(); offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class); context = PowerMock.createMock(SourceTaskContext.class); task.initialize(context); }
Example #6
Source File: ReadYourWritesOffsetStorageAdapter.java From kafka-connect-jenkins with Apache License 2.0 | 5 votes |
private Map<Map<String, String>, Map<String, Object>> loadAndGetOffsets(OffsetStorageReader reader, String jobUrls) { String[] jobUrlArray = jobUrls.split(","); logger.debug("Total jobs: {}. Loading offsets from Connect.", jobUrlArray.length); Collection<Map<String, String>> partitions = new ArrayList<>(jobUrlArray.length); for (String jobUrl : jobUrlArray) { partitions.add(Collections.singletonMap(JenkinsSourceTask.JOB_NAME, urlDecode(extractJobName(jobUrl)))); } return reader.offsets(partitions); }
Example #7
Source File: RestTaskTest.java From kafka-connect-rest with Apache License 2.0 | 5 votes |
@Override public OffsetStorageReader offsetStorageReader() { return new OffsetStorageReader() { @Override public <T> Map<String, Object> offset(Map<String, T> map) { return null; } @Override public <T> Map<Map<String, T>, Map<String, Object>> offsets(Collection<Map<String, T>> collection) { return null; } }; }
Example #8
Source File: FsSourceTaskTest.java From kafka-connect-fs with Apache License 2.0 | 4 votes |
@BeforeEach public void initTask() { for (TaskFsTestConfig fsConfig : TEST_FILE_SYSTEMS) { Map<String, String> taskConfig = new HashMap<String, String>() {{ String[] uris = fsConfig.getDirectories().stream().map(Path::toString) .toArray(String[]::new); put(FsSourceTaskConfig.FS_URIS, String.join(",", uris)); put(FsSourceTaskConfig.TOPIC, "topic_test"); put(FsSourceTaskConfig.POLICY_CLASS, SimplePolicy.class.getName()); put(FsSourceTaskConfig.FILE_READER_CLASS, TextFileReader.class.getName()); put(FsSourceTaskConfig.POLICY_REGEXP, "^[0-9]*\\.txt$"); }}; //Mock initialization SourceTaskContext taskContext = PowerMock.createMock(SourceTaskContext.class); OffsetStorageReader offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class); EasyMock.expect(taskContext.offsetStorageReader()) .andReturn(offsetStorageReader); EasyMock.expect(taskContext.offsetStorageReader()) .andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject())) .andReturn(new HashMap<String, Object>() {{ put("offset", (long) (NUM_RECORDS / 2)); }}); EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject())) .andReturn(new HashMap<String, Object>() {{ put("offset", (long) (NUM_RECORDS / 2)); }}); EasyMock.checkOrder(taskContext, false); EasyMock.replay(taskContext); EasyMock.checkOrder(offsetStorageReader, false); EasyMock.replay(offsetStorageReader); FsSourceTask task = new FsSourceTask(); task.initialize(taskContext); fsConfig.setTaskConfig(taskConfig); fsConfig.setTask(task); } }
Example #9
Source File: MsSqlTableMetadataProvider.java From kafka-connect-cdc-mssql with Apache License 2.0 | 4 votes |
public MsSqlTableMetadataProvider(MsSqlSourceConnectorConfig config, OffsetStorageReader offsetStorageReader) { super(config, offsetStorageReader); }
Example #10
Source File: MsSqlSourceTask.java From kafka-connect-cdc-mssql with Apache License 2.0 | 4 votes |
@Override protected Service service(ChangeWriter changeWriter, OffsetStorageReader offsetStorageReader) { this.changeWriter = changeWriter; this.tableMetadataProvider = new MsSqlTableMetadataProvider(this.config, offsetStorageReader); return new QueryService(this.time, this.tableMetadataProvider, this.config, this.changeWriter); }
Example #11
Source File: MsSqlTableMetadataProviderTest.java From kafka-connect-cdc-mssql with Apache License 2.0 | 4 votes |
@BeforeEach public void before(@MsSqlSettings Map<String, String> settings) { this.config = new MsSqlSourceConnectorConfig(settings); this.offsetStorageReader = mock(OffsetStorageReader.class); this.tableMetadataProvider = new MsSqlTableMetadataProvider(this.config, this.offsetStorageReader); }
Example #12
Source File: AbstractSpoolDirSourceTaskTest.java From kafka-connect-spooldir with Apache License 2.0 | 4 votes |
protected void poll(final String packageName, TestCase testCase) throws InterruptedException, IOException { String keySchemaConfig = ObjectMapperFactory.INSTANCE.writeValueAsString(testCase.keySchema); String valueSchemaConfig = ObjectMapperFactory.INSTANCE.writeValueAsString(testCase.valueSchema); Map<String, String> settings = this.settings(); settings.put(AbstractSourceConnectorConfig.INPUT_FILE_PATTERN_CONF, String.format("^.*\\.%s", packageName)); settings.put(AbstractSpoolDirSourceConnectorConfig.KEY_SCHEMA_CONF, keySchemaConfig); settings.put(AbstractSpoolDirSourceConnectorConfig.VALUE_SCHEMA_CONF, valueSchemaConfig); if (null != testCase.settings && !testCase.settings.isEmpty()) { settings.putAll(testCase.settings); } this.task = createTask(); SourceTaskContext sourceTaskContext = mock(SourceTaskContext.class); OffsetStorageReader offsetStorageReader = mock(OffsetStorageReader.class); when(offsetStorageReader.offset(anyMap())).thenReturn(testCase.offset); when(sourceTaskContext.offsetStorageReader()).thenReturn(offsetStorageReader); this.task.initialize(sourceTaskContext); this.task.start(settings); String dataFile = new File(packageName, Files.getNameWithoutExtension(testCase.path.toString())) + ".data"; log.trace("poll(String, TestCase) - dataFile={}", dataFile); String inputFileName = String.format("%s.%s", Files.getNameWithoutExtension(testCase.path.toString()), packageName ); final File inputFile = new File(this.inputPath, inputFileName); log.trace("poll(String, TestCase) - inputFile = {}", inputFile); final File processingFile = InputFileDequeue.processingFile(AbstractSourceConnectorConfig.PROCESSING_FILE_EXTENSION_DEFAULT, inputFile); try (InputStream inputStream = this.getClass().getResourceAsStream(dataFile)) { try (OutputStream outputStream = new FileOutputStream(inputFile)) { ByteStreams.copy(inputStream, outputStream); } } assertFalse(processingFile.exists(), String.format("processingFile %s should not exist before first poll().", processingFile)); assertTrue(inputFile.exists(), String.format("inputFile %s should exist.", inputFile)); List<SourceRecord> records = this.task.poll(); assertTrue(inputFile.exists(), String.format("inputFile %s should exist after first poll().", inputFile)); assertTrue(processingFile.exists(), String.format("processingFile %s should exist after first poll().", processingFile)); assertNotNull(records, "records should not be null."); assertFalse(records.isEmpty(), "records should not be empty"); assertEquals(testCase.expected.size(), records.size(), "records.size() does not match."); /* The following headers will change. Lets ensure they are there but we don't care about their values since they are driven by things that will change such as lastModified dates and paths. */ List<String> headersToRemove = Arrays.asList( Metadata.HEADER_LAST_MODIFIED, Metadata.HEADER_PATH, Metadata.HEADER_LENGTH ); for (int i = 0; i < testCase.expected.size(); i++) { SourceRecord expectedRecord = testCase.expected.get(i); SourceRecord actualRecord = records.get(i); for (String headerToRemove : headersToRemove) { assertNotNull( actualRecord.headers().lastWithName(headerToRemove), String.format("index:%s should have the header '%s'", i, headerToRemove) ); actualRecord.headers().remove(headerToRemove); expectedRecord.headers().remove(headerToRemove); } assertSourceRecord(expectedRecord, actualRecord, String.format("index:%s", i)); } records = this.task.poll(); assertNull(records, "records should be null after first poll."); records = this.task.poll(); assertNull(records, "records should be null after first poll."); assertFalse(inputFile.exists(), String.format("inputFile %s should not exist.", inputFile)); assertFalse(processingFile.exists(), String.format("processingFile %s should not exist.", processingFile)); final File finishedFile = new File(this.finishedPath, inputFileName); assertTrue(finishedFile.exists(), String.format("finishedFile %s should exist.", finishedFile)); }
Example #13
Source File: SpoolDirCsvSourceTaskTest.java From kafka-connect-spooldir with Apache License 2.0 | 4 votes |
@Test public void rebalance() throws IOException, InterruptedException { Schema schema = SchemaBuilder.struct() .field("id", Schema.INT32_SCHEMA) .build(); final int count = 100; List<Struct> values = new ArrayList<>(count); for (int i = 0; i < count; i++) { values.add( new Struct(schema) .put("id", i) ); } File inputFile = new File(this.inputPath, "input.csv"); writeCSV(inputFile, schema, values); Map<String, String> settings = settings(); settings.put(SpoolDirCsvSourceConnectorConfig.KEY_SCHEMA_CONF, ObjectMapperFactory.INSTANCE.writeValueAsString(schema)); settings.put(SpoolDirCsvSourceConnectorConfig.VALUE_SCHEMA_CONF, ObjectMapperFactory.INSTANCE.writeValueAsString(schema)); settings.put(SpoolDirCsvSourceConnectorConfig.BATCH_SIZE_CONF, "50"); settings.put(SpoolDirCsvSourceConnectorConfig.INPUT_FILE_PATTERN_CONF, ".*"); SpoolDirCsvSourceTask task = new SpoolDirCsvSourceTask(); SourceTaskContext sourceTaskContext = mock(SourceTaskContext.class); OffsetStorageReader offsetStorageReader = mock(OffsetStorageReader.class); when(offsetStorageReader.offset(anyMap())) .thenReturn(null); when(sourceTaskContext.offsetStorageReader()).thenReturn(offsetStorageReader); task.initialize(sourceTaskContext); task.start(settings); List<SourceRecord> records = new ArrayList<>(); records.addAll(task.poll()); assertEquals(50, records.size()); SourceRecord lastRecord = records.get(49); when(offsetStorageReader.offset(anyMap())).thenReturn((Map<String, Object>) lastRecord.sourceOffset()); task.stop(); task.start(settings); records.addAll(task.poll()); assertEquals(count, records.size(), "Expected number of records does not match."); assertNull(task.poll(), "Polling should be finished with the file by now."); assertNull(task.poll(), "Polling should be finished with the file by now."); }
Example #14
Source File: MongodbSourceUriTaskTest.java From kafka-connect-mongodb with Apache License 2.0 | 4 votes |
@Override public void setUp() { offsets = new HashMap<>(); totalWrittenDocuments = 0; try { super.setUp(); mongodStarter = MongodStarter.getDefaultInstance(); mongodConfig = new MongodConfigBuilder() .version(Version.Main.V3_2) .replication(new Storage(REPLICATION_PATH, "rs0", 1024)) .net(new Net(12345, Network.localhostIsIPv6())) .build(); mongodExecutable = mongodStarter.prepare(mongodConfig); mongod = mongodExecutable.start(); mongoClient = new MongoClient(new ServerAddress("localhost", 12345)); MongoDatabase adminDatabase = mongoClient.getDatabase("admin"); BasicDBObject replicaSetSetting = new BasicDBObject(); replicaSetSetting.put("_id", "rs0"); BasicDBList members = new BasicDBList(); DBObject host = new BasicDBObject(); host.put("_id", 0); host.put("host", "127.0.0.1:12345"); members.add(host); replicaSetSetting.put("members", members); adminDatabase.runCommand(new BasicDBObject("isMaster", 1)); adminDatabase.runCommand(new BasicDBObject("replSetInitiate", replicaSetSetting)); MongoDatabase db = mongoClient.getDatabase("mydb"); db.createCollection("test1"); db.createCollection("test2"); db.createCollection("test3"); } catch (Exception e) { // Assert.assertTrue(false); } task = new MongodbSourceTask(); offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class); context = PowerMock.createMock(SourceTaskContext.class); task.initialize(context); sourceProperties = new HashMap<>(); sourceProperties.put("uri", "mongodb://localhost:12345"); sourceProperties.put("batch.size", Integer.toString(100)); sourceProperties.put("schema.name", "schema"); sourceProperties.put("topic.prefix", "prefix"); sourceProperties.put("databases", "mydb.test1,mydb.test2,mydb.test3"); }
Example #15
Source File: MongodbSourceTaskTest.java From kafka-connect-mongodb with Apache License 2.0 | 4 votes |
@Override public void setUp() { offsets = new HashMap<>(); totalWrittenDocuments = 0; try { super.setUp(); mongodStarter = MongodStarter.getDefaultInstance(); mongodConfig = new MongodConfigBuilder() .version(Version.Main.V3_2) .replication(new Storage(REPLICATION_PATH, "rs0", 1024)) .net(new Net(12345, Network.localhostIsIPv6())) .build(); mongodExecutable = mongodStarter.prepare(mongodConfig); mongod = mongodExecutable.start(); mongoClient = new MongoClient(new ServerAddress("localhost", 12345)); MongoDatabase adminDatabase = mongoClient.getDatabase("admin"); BasicDBObject replicaSetSetting = new BasicDBObject(); replicaSetSetting.put("_id", "rs0"); BasicDBList members = new BasicDBList(); DBObject host = new BasicDBObject(); host.put("_id", 0); host.put("host", "127.0.0.1:12345"); members.add(host); replicaSetSetting.put("members", members); adminDatabase.runCommand(new BasicDBObject("isMaster", 1)); adminDatabase.runCommand(new BasicDBObject("replSetInitiate", replicaSetSetting)); MongoDatabase db = mongoClient.getDatabase("mydb"); db.createCollection("test1"); db.createCollection("test2"); db.createCollection("test3"); } catch (Exception e) { // Assert.assertTrue(false); } task = new MongodbSourceTask(); offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class); context = PowerMock.createMock(SourceTaskContext.class); task.initialize(context); sourceProperties = new HashMap<>(); sourceProperties.put("host", "localhost"); sourceProperties.put("port", Integer.toString(12345)); sourceProperties.put("batch.size", Integer.toString(100)); sourceProperties.put("schema.name", "schema"); sourceProperties.put("topic.prefix", "prefix"); sourceProperties.put("databases", "mydb.test1,mydb.test2,mydb.test3"); }
Example #16
Source File: ReadYourWritesOffsetStorageAdapter.java From kafka-connect-jenkins with Apache License 2.0 | 4 votes |
public ReadYourWritesOffsetStorageAdapter(OffsetStorageReader reader, String jobUrls, Partitions ps) { storageReader = reader; partitions = ps; offsets = loadAndGetOffsets(storageReader, jobUrls); logger.debug("Loaded offsets: {}", offsets); }
Example #17
Source File: PulsarIOSourceTaskContext.java From pulsar with Apache License 2.0 | 4 votes |
PulsarIOSourceTaskContext(OffsetStorageReader reader, PulsarKafkaWorkerConfig pulsarKafkaWorkerConfig) { this.reader = reader; this.pulsarKafkaWorkerConfig = pulsarKafkaWorkerConfig; }
Example #18
Source File: PulsarIOSourceTaskContext.java From pulsar with Apache License 2.0 | 4 votes |
@Override public OffsetStorageReader offsetStorageReader() { return reader; }
Example #19
Source File: KafkaSourceTaskTest.java From MirrorTool-for-Kafka-Connect with Apache License 2.0 | 4 votes |
@Test public void testPollRecordReturnedIncludeHeaders() throws Exception { opts.put(KafkaSourceConnectorConfig.INCLUDE_MESSAGE_HEADERS_CONFIG, "true"); config = new KafkaSourceConnectorConfig(opts); objectUnderTest = new KafkaSourceTask(); offsetStorageReader = createMock(OffsetStorageReader.class); context = createMock(SourceTaskContext.class); consumer = createMock(KafkaConsumer.class); objectUnderTest.initialize(context); TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.endOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); // expectation for poll EasyMock.expect(consumer.poll(Duration.ofMillis(POLL_LOOP_TIMEOUT_MS_VALUE))) .andReturn(createTestRecordsWithHeaders()); replayAll(); objectUnderTest.start(opts); List<SourceRecord> records = objectUnderTest.poll(); SourceRecord testRecord = records.get(0); assertEquals(String.format("%s:%d", FIRST_TOPIC, FIRST_PARTITION), testRecord.sourcePartition().get(TOPIC_PARTITION_KEY)); assertEquals(FIRST_OFFSET, testRecord.sourceOffset().get(OFFSET_KEY)); assertEquals(1, testRecord.headers().size()); verifyAll(); }
Example #20
Source File: AbstractKafkaConnectSource.java From hazelcast-jet-contrib with Apache License 2.0 | 4 votes |
@Override public OffsetStorageReader offsetStorageReader() { return new SourceOffsetStorageReader(); }
Example #21
Source File: Policy.java From kafka-connect-fs with Apache License 2.0 | votes |
FileReader offer(FileMetadata metadata, OffsetStorageReader offsetStorageReader) throws IOException;