Java Code Examples for org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition

The following examples show how to use org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Override
protected KafkaTableSourceBase createKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	return new Kafka09TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets);
}
 
Example 2
Source Project: Flink-CEPplus   Source File: Kafka09PartitionDiscoverer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException {
	List<KafkaTopicPartition> partitions = new LinkedList<>();

	try {
		for (String topic : topics) {
			for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) {
				partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition()));
			}
		}
	} catch (org.apache.kafka.common.errors.WakeupException e) {
		// rethrow our own wakeup exception
		throw new WakeupException();
	}

	return partitions;
}
 
Example 3
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected AbstractPartitionDiscoverer createPartitionDiscoverer(
		KafkaTopicsDescriptor topicsDescriptor,
		int indexOfThisSubtask,
		int numParallelSubtasks) {

	AbstractPartitionDiscoverer mockPartitionDiscoverer = mock(AbstractPartitionDiscoverer.class);

	try {
		when(mockPartitionDiscoverer.discoverPartitions()).thenReturn(partitions);
	} catch (Exception e) {
		// ignore
	}
	when(mockPartitionDiscoverer.setAndCheckDiscoveredPartition(any(KafkaTopicPartition.class))).thenReturn(true);

	return mockPartitionDiscoverer;
}
 
Example 4
Source Project: Flink-CEPplus   Source File: KafkaConsumerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Variant of {@link KafkaConsumerTestBase#readSequence(StreamExecutionEnvironment, StartupMode, Map, Long, Properties, String, Map)} to
 * expect reading from the same start offset and the same value count for all partitions of a single Kafka topic.
 */
protected void readSequence(final StreamExecutionEnvironment env,
							final StartupMode startupMode,
							final Map<KafkaTopicPartition, Long> specificStartupOffsets,
							final Long startupTimestamp,
							final Properties cc,
							final int sourceParallelism,
							final String topicName,
							final int valuesCount,
							final int startFrom) throws Exception {
	HashMap<Integer, Tuple2<Integer, Integer>> partitionsToValuesCountAndStartOffset = new HashMap<>();
	for (int i = 0; i < sourceParallelism; i++) {
		partitionsToValuesCountAndStartOffset.put(i, new Tuple2<>(valuesCount, startFrom));
	}
	readSequence(env, startupMode, specificStartupOffsets, startupTimestamp, cc, topicName, partitionsToValuesCountAndStartOffset);
}
 
Example 5
@Override
protected KafkaTableSourceBase createKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	return new Kafka08TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets);
}
 
Example 6
@Override
protected KafkaTableSourceBase getExpectedKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	return new Kafka08TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets
	);
}
 
Example 7
Source Project: flink   Source File: KafkaDynamicSourceBase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a generic Kafka {@link StreamTableSource}.
 *
 * @param outputDataType         Source produced data type
 * @param topic                  Kafka topic to consume.
 * @param properties             Properties for the Kafka consumer.
 * @param decodingFormat         Decoding format for decoding records from Kafka.
 * @param startupMode            Startup mode for the contained consumer.
 * @param specificStartupOffsets Specific startup offsets; only relevant when startup
 *                               mode is {@link StartupMode#SPECIFIC_OFFSETS}.
 * @param startupTimestampMillis Startup timestamp for offsets; only relevant when startup
 *                               mode is {@link StartupMode#TIMESTAMP}.
 */
protected KafkaDynamicSourceBase(
		DataType outputDataType,
		String topic,
		Properties properties,
		DecodingFormat<DeserializationSchema<RowData>> decodingFormat,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets,
		long startupTimestampMillis) {
	this.outputDataType = Preconditions.checkNotNull(
			outputDataType, "Produced data type must not be null.");
	this.topic = Preconditions.checkNotNull(topic, "Topic must not be null.");
	this.properties = Preconditions.checkNotNull(properties, "Properties must not be null.");
	this.decodingFormat = Preconditions.checkNotNull(
		decodingFormat, "Decoding format must not be null.");
	this.startupMode = Preconditions.checkNotNull(startupMode, "Startup mode must not be null.");
	this.specificStartupOffsets = Preconditions.checkNotNull(
		specificStartupOffsets, "Specific offsets must not be null.");
	this.startupTimestampMillis = startupTimestampMillis;
}
 
Example 8
@Override
protected KafkaTableSourceBase createKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	return new Kafka011TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets);
}
 
Example 9
Source Project: Flink-CEPplus   Source File: Kafka011TableSource.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a Kafka 0.11 {@link StreamTableSource}.
 *
 * @param schema                      Schema of the produced table.
 * @param proctimeAttribute           Field name of the processing time attribute.
 * @param rowtimeAttributeDescriptors Descriptor for a rowtime attribute
 * @param fieldMapping                Mapping for the fields of the table schema to
 *                                    fields of the physical returned type.
 * @param topic                       Kafka topic to consume.
 * @param properties                  Properties for the Kafka consumer.
 * @param deserializationSchema       Deserialization schema for decoding records from Kafka.
 * @param startupMode                 Startup mode for the contained consumer.
 * @param specificStartupOffsets      Specific startup offsets; only relevant when startup
 *                                    mode is {@link StartupMode#SPECIFIC_OFFSETS}.
 */
public Kafka011TableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Optional<Map<String, String>> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	super(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		fieldMapping,
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets);
}
 
Example 10
@Override
protected KafkaTableSourceBase getExpectedKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	return new Kafka011TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets
	);
}
 
Example 11
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
DummyFlinkKafkaConsumer(
		AbstractFetcher<T, ?> fetcher,
		List<String> topics,
		List<KafkaTopicPartition> partitions,
		long discoveryInterval) {

	super(
		topics,
		null,
		(KafkaDeserializationSchema< T >) mock(KafkaDeserializationSchema.class),
		discoveryInterval,
		false);

	this.fetcher = fetcher;
	this.partitions = partitions;
}
 
Example 12
Source Project: flink   Source File: KafkaPartitionDiscoverer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException, RuntimeException {
	final List<KafkaTopicPartition> partitions = new LinkedList<>();

	try {
		for (String topic : topics) {
			final List<PartitionInfo> kafkaPartitions = kafkaConsumer.partitionsFor(topic);

			if (kafkaPartitions == null) {
				throw new RuntimeException("Could not fetch partitions for %s. Make sure that the topic exists.".format(topic));
			}

			for (PartitionInfo partitionInfo : kafkaPartitions) {
				partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition()));
			}
		}
	} catch (org.apache.kafka.common.errors.WakeupException e) {
		// rethrow our own wakeup exception
		throw new WakeupException();
	}

	return partitions;
}
 
Example 13
@SuppressWarnings("unchecked")
DummyFlinkKafkaConsumer(
		AbstractFetcher<T, ?> fetcher,
		List<String> topics,
		List<KafkaTopicPartition> partitions,
		long discoveryInterval) {

	super(
		topics,
		null,
		(KafkaDeserializationSchema< T >) mock(KafkaDeserializationSchema.class),
		discoveryInterval,
		false);

	this.fetcher = fetcher;
	this.partitions = partitions;
}
 
Example 14
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that no checkpoints happen when the fetcher is not running.
 */
@Test
public void ignoreCheckpointWhenNotRunning() throws Exception {
	@SuppressWarnings("unchecked")
	final MockFetcher<String> fetcher = new MockFetcher<>();
	final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(
			fetcher,
			mock(AbstractPartitionDiscoverer.class),
			false);

	final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>();
	setupConsumer(consumer, false, listState, true, 0, 1);

	// snapshot before the fetcher starts running
	consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));

	// no state should have been checkpointed
	assertFalse(listState.get().iterator().hasNext());

	// acknowledgement of the checkpoint should also not result in any offset commits
	consumer.notifyCheckpointComplete(1L);
	assertNull(fetcher.getAndClearLastCommittedOffsets());
	assertEquals(0, fetcher.getCommitCount());
}
 
Example 15
Source Project: flink   Source File: KafkaDynamicSource.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a generic Kafka {@link StreamTableSource}.
 *
 * @param outputDataType         Source output data type
 * @param topic                  Kafka topic to consume
 * @param properties             Properties for the Kafka consumer
 * @param decodingFormat         Decoding format for decoding records from Kafka
 * @param startupMode            Startup mode for the contained consumer
 * @param specificStartupOffsets Specific startup offsets; only relevant when startup
 *                               mode is {@link StartupMode#SPECIFIC_OFFSETS}
 */
public KafkaDynamicSource(
		DataType outputDataType,
		String topic,
		Properties properties,
		DecodingFormat<DeserializationSchema<RowData>> decodingFormat,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets,
		long startupTimestampMillis) {

	super(
		outputDataType,
		topic,
		properties,
		decodingFormat,
		startupMode,
		specificStartupOffsets,
		startupTimestampMillis);
}
 
Example 16
Source Project: flink   Source File: KafkaTableSourceBase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a generic Kafka {@link StreamTableSource}.
 *
 * @param schema                      Schema of the produced table.
 * @param proctimeAttribute           Field name of the processing time attribute.
 * @param rowtimeAttributeDescriptors Descriptor for a rowtime attribute
 * @param fieldMapping                Mapping for the fields of the table schema to
 *                                    fields of the physical returned type.
 * @param topic                       Kafka topic to consume.
 * @param properties                  Properties for the Kafka consumer.
 * @param deserializationSchema       Deserialization schema for decoding records from Kafka.
 * @param startupMode                 Startup mode for the contained consumer.
 * @param specificStartupOffsets      Specific startup offsets; only relevant when startup
 *                                    mode is {@link StartupMode#SPECIFIC_OFFSETS}.
 * @param startupTimestampMillis	  Startup timestamp for offsets; only relevant when startup
 *                                    mode is {@link StartupMode#TIMESTAMP}.
 */
protected KafkaTableSourceBase(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Optional<Map<String, String>> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets,
		long startupTimestampMillis) {
	this.schema = TableSchemaUtils.checkNoGeneratedColumns(schema);
	this.proctimeAttribute = validateProctimeAttribute(proctimeAttribute);
	this.rowtimeAttributeDescriptors = validateRowtimeAttributeDescriptors(rowtimeAttributeDescriptors);
	this.fieldMapping = fieldMapping;
	this.topic = Preconditions.checkNotNull(topic, "Topic must not be null.");
	this.properties = Preconditions.checkNotNull(properties, "Properties must not be null.");
	this.deserializationSchema = Preconditions.checkNotNull(
		deserializationSchema, "Deserialization schema must not be null.");
	this.startupMode = Preconditions.checkNotNull(startupMode, "Startup mode must not be null.");
	this.specificStartupOffsets = Preconditions.checkNotNull(
		specificStartupOffsets, "Specific offsets must not be null.");
	this.startupTimestampMillis = startupTimestampMillis;
}
 
Example 17
Source Project: flink   Source File: Kafka010DynamicSource.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a Kafka 0.10 {@link StreamTableSource}.
 *
 * @param outputDataType         Source output data type
 * @param topic                  Kafka topic to consume
 * @param properties             Properties for the Kafka consumer
 * @param decodingFormat         Decoding format for decoding records from Kafka
 * @param startupMode            Startup mode for the contained consumer
 * @param specificStartupOffsets Specific startup offsets; only relevant when startup
 *                               mode is {@link StartupMode#SPECIFIC_OFFSETS}
 * @param startupTimestampMillis Startup timestamp for offsets; only relevant when startup
 *                               mode is {@link StartupMode#TIMESTAMP}
 */
public Kafka010DynamicSource(
		DataType outputDataType,
		String topic,
		Properties properties,
		DecodingFormat<DeserializationSchema<RowData>> decodingFormat,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets,
		long startupTimestampMillis) {

	super(
		outputDataType,
		topic,
		properties,
		decodingFormat,
		startupMode,
		specificStartupOffsets,
		startupTimestampMillis);
}
 
Example 18
Source Project: flink   Source File: Kafka011TableSourceSinkFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected KafkaTableSourceBase getExpectedKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets,
		long startupTimestampMillis) {

	return new Kafka011TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets,
		startupTimestampMillis
	);
}
 
Example 19
Source Project: flink   Source File: KafkaTableSourceSinkFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected KafkaTableSourceBase createKafkaTableSource(
	TableSchema schema,
	Optional<String> proctimeAttribute,
	List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
	Map<String, String> fieldMapping,
	String topic,
	Properties properties,
	DeserializationSchema<Row> deserializationSchema,
	StartupMode startupMode,
	Map<KafkaTopicPartition, Long> specificStartupOffsets,
	long startupTimestampMillis) {

	return new KafkaTableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets,
		startupTimestampMillis);
}
 
Example 20
Source Project: flink   Source File: Kafka010TableSourceSinkFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected KafkaTableSourceBase getExpectedKafkaTableSource(
		TableSchema schema,
		Optional<String> proctimeAttribute,
		List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors,
		Map<String, String> fieldMapping,
		String topic,
		Properties properties,
		DeserializationSchema<Row> deserializationSchema,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets) {

	return new Kafka010TableSource(
		schema,
		proctimeAttribute,
		rowtimeAttributeDescriptors,
		Optional.of(fieldMapping),
		topic,
		properties,
		deserializationSchema,
		startupMode,
		specificStartupOffsets
	);
}
 
Example 21
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumer09.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {

	// make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS;
	// this overwrites whatever setting the user configured in the properties
	adjustAutoCommitConfig(properties, offsetCommitMode);

	// If a rateLimiter is set, then call rateLimiter.open() with the runtime context.
	if (rateLimiter != null) {
		rateLimiter.open(runtimeContext);
	}

	return new Kafka09Fetcher<>(
			sourceContext,
			assignedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			runtimeContext.getProcessingTimeService(),
			runtimeContext.getExecutionConfig().getAutoWatermarkInterval(),
			runtimeContext.getUserCodeClassLoader(),
			runtimeContext.getTaskNameWithSubtasks(),
			deserializer,
			properties,
			pollTimeout,
			runtimeContext.getMetricGroup(),
			consumerMetricGroup,
			useMetrics,
			rateLimiter);
}
 
Example 22
Source Project: flink   Source File: FlinkKafkaConsumer010.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
		Collection<KafkaTopicPartition> partitions,
		long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());

	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}
	}

	return result;
}
 
Example 23
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 24
Source Project: flink   Source File: KafkaDynamicTableFactoryTestBase.java    License: Apache License 2.0 5 votes vote down vote up
protected abstract KafkaDynamicSourceBase getExpectedScanSource(
		DataType producedDataType,
		String topic,
		Properties properties,
		DecodingFormat<DeserializationSchema<RowData>> decodingFormat,
		StartupMode startupMode,
		Map<KafkaTopicPartition, Long> specificStartupOffsets,
		long startupTimestamp
);
 
Example 25
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumer08.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {

	long autoCommitInterval = (offsetCommitMode == OffsetCommitMode.KAFKA_PERIODIC)
			? PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000)
			: -1; // this disables the periodic offset committer thread in the fetcher

	return new Kafka08Fetcher<>(
			sourceContext,
			assignedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			runtimeContext,
			deserializer,
			kafkaProperties,
			autoCommitInterval,
			consumerMetricGroup,
			useMetrics);
}
 
Example 26
Source Project: flink   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
		Map<KafkaTopicPartition, Long> offsets,
		@Nonnull KafkaCommitCallback commitCallback) throws Exception {
	this.lastCommittedOffsets = offsets;
	this.commitCount++;
	commitCallback.onSuccess();
}
 
Example 27
Source Project: Flink-CEPplus   Source File: TestPartitionDiscoverer.java    License: Apache License 2.0 5 votes vote down vote up
public TestPartitionDiscoverer(
		KafkaTopicsDescriptor topicsDescriptor,
		int indexOfThisSubtask,
		int numParallelSubtasks,
		List<List<String>> mockGetAllTopicsReturnSequence,
		List<List<KafkaTopicPartition>> mockGetAllPartitionsForTopicsReturnSequence) {

	super(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks);

	this.topicsDescriptor = topicsDescriptor;
	this.mockGetAllTopicsReturnSequence = mockGetAllTopicsReturnSequence;
	this.mockGetAllPartitionsForTopicsReturnSequence = mockGetAllPartitionsForTopicsReturnSequence;
}
 
Example 28
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
	Collection<KafkaTopicPartition> partitions,
	long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());
	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}

	}
	return result;
}
 
Example 29
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled.
 */
@Test
public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception {
	assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2);

	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, 1000L); // discovery enabled

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file; should fail since discovery is enabled
	try {
		testHarness.initializeState(
			OperatorSnapshotUtil.getResourceFilename(
				"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

		fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled.");
	} catch (Exception e) {
		Assert.assertTrue(e instanceof IllegalArgumentException);
	}
}
 
Example 30
Source Project: flink   Source File: TestPartitionDiscoverer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) {
	if (topicsDescriptor.isFixedTopics()) {
		assertEquals(topicsDescriptor.getFixedTopics(), topics);
	} else {
		assertEquals(mockGetAllTopicsReturnSequence.get(getAllPartitionsForTopicsInvokeCount - 1), topics);
	}
	return mockGetAllPartitionsForTopicsReturnSequence.get(getAllPartitionsForTopicsInvokeCount++);
}