org.apache.kafka.connect.sink.SinkTaskContext Java Examples
The following examples show how to use
org.apache.kafka.connect.sink.SinkTaskContext.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RedisSinkTaskReconnectIT.java From kafka-connect-redis with Apache License 2.0 | 6 votes |
@Test public void initialConnectionIssues( @DockerContainer(container = "redis") Container container, @Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException, IOException { log.info("address = {}", address); final String topic = "putWrite"; SinkTaskContext context = mock(SinkTaskContext.class); when(context.assignment()).thenReturn(ImmutableSet.of()); this.task.initialize(context); container.stop(); ExecutorService service = Executors.newSingleThreadExecutor(); Future<?> future = service.submit(() -> task.start( ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort()) ) )); container.start(); Time.SYSTEM.sleep(2000); future.get(); }
Example #2
Source File: RedisSinkTaskReconnectIT.java From kafka-connect-redis with Apache License 2.0 | 6 votes |
@Test public void serverReset( @DockerContainer(container = "redis") Container container, @Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException, IOException { log.info("address = {}", address); final String topic = "putWrite"; SinkTaskContext context = mock(SinkTaskContext.class); when(context.assignment()).thenReturn(ImmutableSet.of()); this.task.initialize(context); this.task.start( ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) ); sendAndVerifyRecords(task, topic, 0); container.stop(); assertThrows(RetriableException.class, () -> { sendAndVerifyRecords(task, topic, 100); }); container.start(); sendAndVerifyRecords(task, topic, 100); }
Example #3
Source File: LambdaSinkTaskTest.java From kafka-connect-lambda with Apache License 2.0 | 5 votes |
@Ignore("Test is ignored as a demonstration -- needs credentials") @Test public void testPutWhenBatchingIsNotEnabled() { ImmutableMap<String, String> props = new ImmutableMap.Builder<String, String>() .put("connector.class", "com.nordstrom.kafka.connect.lambda.LambdaSinkConnector") .put("tasks.max", "1") .put("aws.lambda.function.arn", "arn:aws:lambda:us-west-2:123456789123:function:test-lambda") .put("aws.lambda.invocation.timeout.ms", "300000") .put("aws.lambda.invocation.mode", "SYNC") .put("aws.lambda.batch.enabled", "false") .put("key.converter", "org.apache.kafka.connect.storage.StringConverter") .put("value.converter", "org.apache.kafka.connect.storage.StringConverter") .put("topics", "connect-lambda-test") .build(); LambdaSinkTask task = new LambdaSinkTask(); task.initialize(mock(SinkTaskContext.class)); task.start(props); InvocationClient mockedClient = mock(InvocationClient.class); when(mockedClient.invoke(any())) .thenReturn(new InvocationResponse(200, "test log", "", Instant.now(), Instant.now())); Schema testSchema = SchemaBuilder.struct().name("com.nordstrom.kafka.connect.lambda.foo").field("bar", STRING_SCHEMA).build(); SinkRecord testRecord = new SinkRecord("connect-lambda-test", 0, STRING_SCHEMA, "sometestkey", testSchema, "testing", 0, 0L, TimestampType.CREATE_TIME); Collection<SinkRecord> testList = new ArrayList<>(); testList.add(testRecord); task.put(testList); }
Example #4
Source File: RedisSinkTaskIT.java From kafka-connect-redis with Apache License 2.0 | 5 votes |
@Test public void emptyAssignment(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { log.info("address = {}", address); final String topic = "putWrite"; SinkTaskContext context = mock(SinkTaskContext.class); when(context.assignment()).thenReturn(ImmutableSet.of()); this.task.initialize(context); this.task.start( ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) ); }
Example #5
Source File: RedisSinkTaskIT.java From kafka-connect-redis with Apache License 2.0 | 5 votes |
@Test public void putEmpty(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { log.info("address = {}", address); final String topic = "putWrite"; SinkTaskContext context = mock(SinkTaskContext.class); when(context.assignment()).thenReturn(ImmutableSet.of(new TopicPartition(topic, 1))); this.task.initialize(context); this.task.start( ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) ); this.task.put(ImmutableList.of()); }
Example #6
Source File: TopicPartitionWriter.java From streamx with Apache License 2.0 | 5 votes |
public TopicPartitionWriter( TopicPartition tp, Storage storage, RecordWriterProvider writerProvider, Partitioner partitioner, HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) { this(tp, storage, writerProvider, partitioner, connectorConfig, context, avroData, null, null, null, null, null); }
Example #7
Source File: AmazonKinesisSinkTask.java From kinesis-kafka-connector with Apache License 2.0 | 4 votes |
@Override public void initialize(SinkTaskContext context) { sinkTaskContext = context; }
Example #8
Source File: RedisSinkTaskIT.java From kafka-connect-redis with Apache License 2.0 | 4 votes |
@Test public void putDelete(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { log.info("address = {}", address); final String topic = "putDelete"; SinkTaskContext context = mock(SinkTaskContext.class); when(context.assignment()).thenReturn(ImmutableSet.of(new TopicPartition(topic, 1))); this.task.initialize(context); this.task.start( ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) ); final int count = 50; final Map<String, String> expected = new LinkedHashMap<>(count); final List<SinkRecord> records = new ArrayList<>(count); for (int i = 0; i < count; i++) { final String key = String.format("putDelete%s", i); final String value = String.format("This is value %s", i); records.add( delete(topic, new SchemaAndValue(Schema.STRING_SCHEMA, key) ) ); expected.put(key, value); } final Map<byte[], byte[]> values = expected.entrySet().stream() .collect(Collectors.toMap( kv -> kv.getKey().getBytes(Charsets.UTF_8), kv -> kv.getValue().getBytes(Charsets.UTF_8) )); this.task.session.asyncCommands().mset(values).get(); this.task.put(records); final byte[][] keys = expected.keySet().stream() .map(s -> s.getBytes(Charsets.UTF_8)) .toArray(byte[][]::new); final long actual = this.task.session.asyncCommands().exists(keys).get(); assertEquals(0L, actual, "All of the keys should be removed from Redis."); }
Example #9
Source File: TopicPartitionWriter.java From streamx with Apache License 2.0 | 4 votes |
public TopicPartitionWriter( TopicPartition tp, Storage storage, RecordWriterProvider writerProvider, Partitioner partitioner, HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData, HiveMetaStore hiveMetaStore, HiveUtil hive, SchemaFileReader schemaFileReader, ExecutorService executorService, Queue<Future<Void>> hiveUpdateFutures) { this.tp = tp; this.connectorConfig = connectorConfig; this.context = context; this.avroData = avroData; this.storage = storage; this.writerProvider = writerProvider; this.partitioner = partitioner; this.url = storage.url(); this.conf = storage.conf(); this.schemaFileReader = schemaFileReader; topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG); flushSize = connectorConfig.getInt(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG); rotateIntervalMs = connectorConfig.getLong(HdfsSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG); rotateScheduleIntervalMs = connectorConfig.getLong(HdfsSinkConnectorConfig.ROTATE_SCHEDULE_INTERVAL_MS_CONFIG); timeoutMs = connectorConfig.getLong(HdfsSinkConnectorConfig.RETRY_BACKOFF_CONFIG); compatibility = SchemaUtils.getCompatibility( connectorConfig.getString(HdfsSinkConnectorConfig.SCHEMA_COMPATIBILITY_CONFIG)); String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG); wal = storage.wal(logsDir, tp); buffer = new LinkedList<>(); writers = new HashMap<>(); tempFiles = new HashMap<>(); appended = new HashSet<>(); startOffsets = new HashMap<>(); offsets = new HashMap<>(); state = State.RECOVERY_STARTED; failureTime = -1L; offset = -1L; sawInvalidOffset = false; extension = writerProvider.getExtension(); zeroPadOffsetFormat = "%0" + connectorConfig.getInt(HdfsSinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG) + "d"; hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG); if (hiveIntegration) { hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG); this.hiveMetaStore = hiveMetaStore; this.hive = hive; this.executorService = executorService; this.hiveUpdateFutures = hiveUpdateFutures; hivePartitions = new HashSet<>(); } if(rotateScheduleIntervalMs > 0) { timeZone = DateTimeZone.forID(connectorConfig.getString(HdfsSinkConnectorConfig.TIMEZONE_CONFIG)); } // Initialize rotation timers updateRotationTimers(); }
Example #10
Source File: AvroHiveUtilTest.java From streamx with Apache License 2.0 | 4 votes |
private DataWriter createWriter(SinkTaskContext context, AvroData avroData){ return new DataWriter(connectorConfig, context, avroData); }
Example #11
Source File: ParquetHiveUtilTest.java From streamx with Apache License 2.0 | 4 votes |
private DataWriter createWriter(SinkTaskContext context, AvroData avroData) { Map<String, String> props = createProps(); HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props); return new DataWriter(connectorConfig, context, avroData); }
Example #12
Source File: PostgreSQLSinkTask.java From kafka-sink-pg-json with MIT License | 3 votes |
/** * Initialise sink task * @param context context of the sink task */ @Override public void initialize(SinkTaskContext context) { iTaskContext=context;//save task context }