Java Code Examples for org.apache.flink.streaming.api.operators.StreamSource#cancel()

The following examples show how to use org.apache.flink.streaming.api.operators.StreamSource#cancel() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamSourceOperatorWatermarksTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testNoMaxWatermarkOnImmediateCancel() throws Exception {

	final List<StreamElement> output = new ArrayList<>();

	// regular stream source operator
	final StreamSource<String, InfiniteSource<String>> operator =
			new StreamSource<>(new InfiniteSource<String>());

	setupSourceOperator(operator, TimeCharacteristic.EventTime, 0);
	operator.cancel();

	// run and exit
	operator.run(new Object(), mock(StreamStatusMaintainer.class), new CollectorOutput<String>(output));

	assertTrue(output.isEmpty());
}
 
Example 2
Source File: StreamSourceOperatorWatermarksTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testNoMaxWatermarkOnImmediateCancel() throws Exception {

	final List<StreamElement> output = new ArrayList<>();

	// regular stream source operator
	final StreamSource<String, InfiniteSource<String>> operator =
			new StreamSource<>(new InfiniteSource<String>());

	setupSourceOperator(operator, TimeCharacteristic.EventTime, 0);
	operator.cancel();

	// run and exit
	OperatorChain<?, ?> operatorChain = createOperatorChain(operator);
	try {
		operator.run(new Object(), mock(StreamStatusMaintainer.class), new CollectorOutput<String>(output), operatorChain);
	} finally {
		operatorChain.releaseOutputs();
	}

	assertTrue(output.isEmpty());
}
 
Example 3
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 4
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 5
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 6
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 7
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 8
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 9
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithEmptyState() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	// although the restore state is empty, the fetcher should still have been registered the initial discovered shard;
	// furthermore, the discovered shard should be considered a newly created shard while the job wasn't running,
	// and therefore should be consumed from the earliest sequence number
	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 10
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithReshardedStream() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		// setup the closed shard
		Shard closedShard = new Shard();
		closedShard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange();
		closedSequenceNumberRange.withStartingSequenceNumber("1");
		closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard
		closedShard.setSequenceNumberRange(closedSequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard));

		// setup the new shards
		Shard newSplitShard1 = new Shard();
		newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1));

		SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange();
		newSequenceNumberRange1.withStartingSequenceNumber("1087654322");
		newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1);

		newSplitShard1.setParentShardId(TEST_SHARD_ID);

		Shard newSplitShard2 = new Shard();
		newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2));

		SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange();
		newSequenceNumberRange2.withStartingSequenceNumber("2087654322");
		newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2);

		newSplitShard2.setParentShardId(TEST_SHARD_ID);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1));
		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));

	// assert that the fetcher is registered with all shards, including new shards
	assertEquals(3, fetcher.getSubscribedShardsState().size());

	KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId());
	assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 11
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));
	assertEquals(1, fetcher.getSubscribedShardsState().size());
	assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum());

	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 12
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithEmptyState() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	// although the restore state is empty, the fetcher should still have been registered the initial discovered shard;
	// furthermore, the discovered shard should be considered a newly created shard while the job wasn't running,
	// and therefore should be consumed from the earliest sequence number
	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 13
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.x-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 14
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithReshardedStream() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		// setup the closed shard
		Shard closedShard = new Shard();
		closedShard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange();
		closedSequenceNumberRange.withStartingSequenceNumber("1");
		closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard
		closedShard.setSequenceNumberRange(closedSequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard));

		// setup the new shards
		Shard newSplitShard1 = new Shard();
		newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1));

		SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange();
		newSequenceNumberRange1.withStartingSequenceNumber("1087654322");
		newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1);

		newSplitShard1.setParentShardId(TEST_SHARD_ID);

		Shard newSplitShard2 = new Shard();
		newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2));

		SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange();
		newSequenceNumberRange2.withStartingSequenceNumber("2087654322");
		newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2);

		newSplitShard2.setParentShardId(TEST_SHARD_ID);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1));
		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));

	// assert that the fetcher is registered with all shards, including new shards
	assertEquals(3, fetcher.getSubscribedShardsState().size());

	KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId());
	assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 15
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));
	assertEquals(1, fetcher.getSubscribedShardsState().size());
	assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum());

	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 16
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 17
Source File: FlinkKinesisConsumerMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithReshardedStream() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		// setup the closed shard
		Shard closedShard = new Shard();
		closedShard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange();
		closedSequenceNumberRange.withStartingSequenceNumber("1");
		closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard
		closedShard.setSequenceNumberRange(closedSequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard));

		// setup the new shards
		Shard newSplitShard1 = new Shard();
		newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1));

		SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange();
		newSequenceNumberRange1.withStartingSequenceNumber("1087654322");
		newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1);

		newSplitShard1.setParentShardId(TEST_SHARD_ID);

		Shard newSplitShard2 = new Shard();
		newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2));

		SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange();
		newSequenceNumberRange2.withStartingSequenceNumber("2087654322");
		newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2);

		newSplitShard2.setParentShardId(TEST_SHARD_ID);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1));
		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));

	// assert that the fetcher is registered with all shards, including new shards
	assertEquals(3, fetcher.getSubscribedShardsState().size());

	KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId());
	assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 18
Source File: FlinkKinesisConsumerMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));
	assertEquals(1, fetcher.getSubscribedShardsState().size());
	assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum());

	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 19
Source File: FlinkKinesisConsumerMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithEmptyState() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	// although the restore state is empty, the fetcher should still have been registered the initial discovered shard;
	// furthermore, the discovered shard should be considered a newly created shard while the job wasn't running,
	// and therefore should be consumed from the earliest sequence number
	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 20
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}