Java Code Examples for org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness

The following examples show how to use org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 2
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 3
/**
 * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled.
 */
@Test
public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception {
	assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2);

	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, 1000L); // discovery enabled

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file; should fail since discovery is enabled
	try {
		testHarness.initializeState(
			OperatorSnapshotUtil.getResourceFilename(
				"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

		fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled.");
	} catch (Exception e) {
		Assert.assertTrue(e instanceof IllegalArgumentException);
	}
}
 
Example 4
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness(
	SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception {

	AbstractStreamOperatorTestHarness<T> testHarness =
		new AbstractStreamOperatorTestHarness<>(
			new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex);

	testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

	return testHarness;
}
 
Example 5
Source Project: Flink-CEPplus   Source File: ListCheckpointedTest.java    License: Apache License 2.0 5 votes vote down vote up
private static void testUDF(TestUserFunction userFunction) throws Exception {
	OperatorSubtaskState snapshot;
	try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) {
		testHarness.open();
		snapshot = testHarness.snapshot(0L, 0L);
		assertFalse(userFunction.isRestored());
	}
	try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) {
		testHarness.initializeState(snapshot);
		testHarness.open();
		assertTrue(userFunction.isRestored());
	}
}
 
Example 6
Source Project: flink   Source File: CoBroadcastWithNonKeyedOperatorTest.java    License: Apache License 2.0 5 votes vote down vote up
private static OperatorSubtaskState repartitionInitState(
		final OperatorSubtaskState initState,
		final int numKeyGroups,
		final int oldParallelism,
		final int newParallelism,
		final int subtaskIndex
) {
	return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
 
Example 7
private static OperatorSubtaskState repartitionInitState(
		final OperatorSubtaskState initState,
		final int numKeyGroups,
		final int oldParallelism,
		final int newParallelism,
		final int subtaskIndex
) {
	return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
 
Example 8
private static OperatorSubtaskState repartitionInitState(
	final OperatorSubtaskState initState,
	final int numKeyGroups,
	final int oldParallelism,
	final int newParallelism,
	final int subtaskIndex
) {
	return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
 
Example 9
Source Project: pulsar-flink   Source File: FlinkPulsarSourceTest.java    License: Apache License 2.0 5 votes vote down vote up
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness(
        SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception {

    AbstractStreamOperatorTestHarness<T> testHarness =
            new AbstractStreamOperatorTestHarness<>(
                    new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex);

    testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

    return testHarness;
}
 
Example 10
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 11
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled.
 */
@Test
public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception {
	assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2);

	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, 1000L); // discovery enabled

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file; should fail since discovery is enabled
	try {
		testHarness.initializeState(
			OperatorSnapshotUtil.getResourceFilename(
				"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

		fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled.");
	} catch (Exception e) {
		Assert.assertTrue(e instanceof IllegalArgumentException);
	}
}
 
Example 12
Source Project: flink   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness(
	SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception {

	AbstractStreamOperatorTestHarness<T> testHarness =
		new AbstractStreamOperatorTestHarness<>(
			new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex);

	testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

	return testHarness;
}
 
Example 13
Source Project: flink   Source File: ListCheckpointedTest.java    License: Apache License 2.0 5 votes vote down vote up
private static void testUDF(TestUserFunction userFunction) throws Exception {
	OperatorSubtaskState snapshot;
	try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) {
		testHarness.open();
		snapshot = testHarness.snapshot(0L, 0L);
		assertFalse(userFunction.isRestored());
	}
	try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) {
		testHarness.initializeState(snapshot);
		testHarness.open();
		assertTrue(userFunction.isRestored());
	}
}
 
Example 14
Source Project: flink   Source File: RMQSourceTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testOpen() throws Exception {
	MockDeserializationSchema<String> deserializationSchema = new MockDeserializationSchema<>();

	RMQSource<String> consumer = new RMQTestSource(deserializationSchema);
	AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(
		new StreamSource<>(consumer), 1, 1, 0
	);

	testHarness.open();
	assertThat("Open method was not called", deserializationSchema.isOpenCalled(), is(true));
}
 
Example 15
Source Project: flink   Source File: ListCheckpointedTest.java    License: Apache License 2.0 5 votes vote down vote up
private static void testUDF(TestUserFunction userFunction) throws Exception {
	OperatorSubtaskState snapshot;
	try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) {
		testHarness.open();
		snapshot = testHarness.snapshot(0L, 0L);
		assertFalse(userFunction.isRestored());
	}
	try (AbstractStreamOperatorTestHarness<Integer> testHarness = createTestHarness(userFunction)) {
		testHarness.initializeState(snapshot);
		testHarness.open();
		assertTrue(userFunction.isRestored());
	}
}
 
Example 16
Source Project: flink   Source File: ListCheckpointedTest.java    License: Apache License 2.0 5 votes vote down vote up
private static AbstractStreamOperatorTestHarness<Integer> createTestHarness(TestUserFunction userFunction) throws Exception {
	return new AbstractStreamOperatorTestHarness<>(
		new StreamMap<>(userFunction),
		1,
		1,
		0);
}
 
Example 17
Source Project: flink   Source File: CoBroadcastWithNonKeyedOperatorTest.java    License: Apache License 2.0 5 votes vote down vote up
private static OperatorSubtaskState repartitionInitState(
		final OperatorSubtaskState initState,
		final int numKeyGroups,
		final int oldParallelism,
		final int newParallelism,
		final int subtaskIndex
) {
	return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
 
Example 18
Source Project: flink   Source File: CoBroadcastWithKeyedOperatorTest.java    License: Apache License 2.0 5 votes vote down vote up
private static OperatorSubtaskState repartitionInitState(
	final OperatorSubtaskState initState,
	final int numKeyGroups,
	final int oldParallelism,
	final int newParallelism,
	final int subtaskIndex
) {
	return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
 
Example 19
Source Project: flink   Source File: RMQSinkTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testOpen() throws Exception {
	MockSerializationSchema<String> serializationSchema = new MockSerializationSchema<>();

	RMQSink<String> producer = new RMQSink<>(rmqConnectionConfig, serializationSchema, publishOptions);
	AbstractStreamOperatorTestHarness<Object> testHarness = new AbstractStreamOperatorTestHarness<>(
		new StreamSink<>(producer), 1, 1, 0
	);

	testHarness.open();
	assertThat("Open method was not called", serializationSchema.isOpenCalled(), is(true));
}
 
Example 20
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 21
Source Project: flink   Source File: FlinkKafkaConsumerBaseMigrationTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 22
Source Project: flink   Source File: CoBroadcastWithKeyedOperatorTest.java    License: Apache License 2.0 5 votes vote down vote up
private static OperatorSubtaskState repartitionInitState(
	final OperatorSubtaskState initState,
	final int numKeyGroups,
	final int oldParallelism,
	final int newParallelism,
	final int subtaskIndex
) {
	return AbstractStreamOperatorTestHarness.repartitionOperatorState(initState, numKeyGroups, oldParallelism, newParallelism, subtaskIndex);
}
 
Example 23
Source Project: flink   Source File: FlinkKinesisProducerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testOpen() throws Exception {
	MockSerializationSchema<Object> serializationSchema = new MockSerializationSchema<>();

	Properties config = TestUtils.getStandardProperties();
	FlinkKinesisProducer<Object> producer = new FlinkKinesisProducer<>(
		serializationSchema,
		config);
	AbstractStreamOperatorTestHarness<Object> testHarness = new AbstractStreamOperatorTestHarness<>(
		new StreamSink<>(producer), 1, 1, 0
	);

	testHarness.open();
	assertThat("Open method was not called", serializationSchema.isOpenCalled(), is(true));
}
 
Example 24
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception {

		final OneShotLatch latch = new OneShotLatch();
		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);

		doAnswer(new Answer<Void>() {
			@Override
			public Void answer(InvocationOnMock invocation) throws Throwable {
				latch.trigger();
				return null;
			}
		}).when(fetcher).runFetchLoop();

		when(fetcher.snapshotCurrentState()).thenReturn(state);

		final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

		final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

		StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
				new StreamSource<>(consumerFunction);

		final AbstractStreamOperatorTestHarness<String> testHarness =
				new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

		testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

		testHarness.setup();
		testHarness.open();

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					consumerFunction.run(new DummySourceContext() {
						@Override
						public void collect(String element) {

						}
					});
				}
				catch (Throwable t) {
					t.printStackTrace();
					error[0] = t;
				}
			}
		};
		runner.start();

		if (!latch.isTriggered()) {
			latch.await();
		}

		final OperatorSubtaskState snapshot;
		synchronized (testHarness.getCheckpointLock()) {
			snapshot = testHarness.snapshot(0L, 0L);
		}

		OperatorSnapshotUtil.writeStateHandle(snapshot, path);

		consumerOperator.close();
		runner.join();
	}
 
Example 25
@Test
public void testRestoreWithEmptyState() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	// although the restore state is empty, the fetcher should still have been registered the initial discovered shard;
	// furthermore, the discovered shard should be considered a newly created shard while the job wasn't running,
	// and therefore should be consumed from the earliest sequence number
	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 26
@Test
public void testRestore() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));
	assertEquals(1, fetcher.getSubscribedShardsState().size());
	assertEquals(TEST_SEQUENCE_NUMBER, fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum());

	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 27
@Test
public void testRestoreWithReshardedStream() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		// setup the closed shard
		Shard closedShard = new Shard();
		closedShard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange();
		closedSequenceNumberRange.withStartingSequenceNumber("1");
		closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard
		closedShard.setSequenceNumberRange(closedSequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard));

		// setup the new shards
		Shard newSplitShard1 = new Shard();
		newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1));

		SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange();
		newSequenceNumberRange1.withStartingSequenceNumber("1087654322");
		newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1);

		newSplitShard1.setParentShardId(TEST_SHARD_ID);

		Shard newSplitShard2 = new Shard();
		newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2));

		SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange();
		newSequenceNumberRange2.withStartingSequenceNumber("2087654322");
		newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2);

		newSplitShard2.setParentShardId(TEST_SHARD_ID);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1));
		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));

	// assert that the fetcher is registered with all shards, including new shards
	assertEquals(3, fetcher.getSubscribedShardsState().size());

	KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId());
	assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 28
@SuppressWarnings("unchecked")
private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception {
	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		new Properties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		state,
		null);

	final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	testHarness.open();

	final AtomicReference<Throwable> error = new AtomicReference<>();

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				consumer.run(new TestSourceContext<>());
			} catch (Throwable t) {
				t.printStackTrace();
				error.set(t);
			}
		}
	};
	runner.start();

	fetcher.waitUntilRun();

	final OperatorSubtaskState snapshot;
	synchronized (testHarness.getCheckpointLock()) {
		snapshot = testHarness.snapshot(0L, 0L);
	}

	OperatorSnapshotUtil.writeStateHandle(snapshot, path);

	consumerOperator.close();
	runner.join();
}
 
Example 29
Source Project: Flink-CEPplus   Source File: RollingSinkITCase.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testScalingDown() throws Exception {
	final File outDir = tempFolder.newFolder();

	OneInputStreamOperatorTestHarness<String, Object> testHarness1 = createRescalingTestSink(outDir, 3, 0);
	testHarness1.setup();
	testHarness1.open();

	OneInputStreamOperatorTestHarness<String, Object> testHarness2 = createRescalingTestSink(outDir, 3, 1);
	testHarness2.setup();
	testHarness2.open();

	OneInputStreamOperatorTestHarness<String, Object> testHarness3 = createRescalingTestSink(outDir, 3, 2);
	testHarness3.setup();
	testHarness3.open();

	testHarness1.processElement(new StreamRecord<>("test1", 0L));
	checkLocalFs(outDir, 1, 0, 0, 0);

	testHarness2.processElement(new StreamRecord<>("test2", 0L));
	testHarness2.processElement(new StreamRecord<>("test3", 0L));
	testHarness2.processElement(new StreamRecord<>("test4", 0L));
	testHarness2.processElement(new StreamRecord<>("test5", 0L));
	testHarness2.processElement(new StreamRecord<>("test6", 0L));
	checkLocalFs(outDir, 2, 4, 0, 0);

	testHarness3.processElement(new StreamRecord<>("test7", 0L));
	testHarness3.processElement(new StreamRecord<>("test8", 0L));
	checkLocalFs(outDir, 3, 5, 0, 0);

	// intentionally we snapshot them in a not ascending order so that the states are shuffled
	OperatorSubtaskState mergedSnapshot = AbstractStreamOperatorTestHarness.repackageState(
		testHarness3.snapshot(0, 0),
		testHarness1.snapshot(0, 0),
		testHarness2.snapshot(0, 0)
	);

	// with the above state reshuffling, we expect testHarness4 to take the
	// state of the previous testHarness3 and testHarness1 while testHarness5
	// will take that of the previous testHarness1

	OperatorSubtaskState initState1 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 3, 2, 0);

	OperatorSubtaskState initState2 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 3, 2, 1);

	OneInputStreamOperatorTestHarness<String, Object> testHarness4 = createRescalingTestSink(outDir, 2, 0);
	testHarness4.setup();
	testHarness4.initializeState(initState1);
	testHarness4.open();

	// we do not have a length file for part-2-0 because bucket part-2-0
	// was not "in-progress", but "pending" (its full content is valid).
	checkLocalFs(outDir, 1, 4, 3, 2);

	OneInputStreamOperatorTestHarness<String, Object> testHarness5 = createRescalingTestSink(outDir, 2, 1);
	testHarness5.setup();
	testHarness5.initializeState(initState2);
	testHarness5.open();

	checkLocalFs(outDir, 0, 0, 8, 3);
}
 
Example 30
Source Project: Flink-CEPplus   Source File: RollingSinkITCase.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testScalingUp() throws Exception {
	final File outDir = tempFolder.newFolder();

	OneInputStreamOperatorTestHarness<String, Object> testHarness1 = createRescalingTestSink(outDir, 2, 0);
	testHarness1.setup();
	testHarness1.open();

	OneInputStreamOperatorTestHarness<String, Object> testHarness2 = createRescalingTestSink(outDir, 2, 0);
	testHarness2.setup();
	testHarness2.open();

	testHarness1.processElement(new StreamRecord<>("test1", 0L));
	testHarness1.processElement(new StreamRecord<>("test2", 0L));

	checkLocalFs(outDir, 1, 1, 0, 0);

	testHarness2.processElement(new StreamRecord<>("test3", 0L));
	testHarness2.processElement(new StreamRecord<>("test4", 0L));
	testHarness2.processElement(new StreamRecord<>("test5", 0L));

	checkLocalFs(outDir, 2, 3, 0, 0);

	// intentionally we snapshot them in the reverse order so that the states are shuffled
	OperatorSubtaskState mergedSnapshot = AbstractStreamOperatorTestHarness.repackageState(
		testHarness2.snapshot(0, 0),
		testHarness1.snapshot(0, 0)
	);

	OperatorSubtaskState initState1 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 2, 3, 0);

	OperatorSubtaskState initState2 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 2, 3, 1);

	OperatorSubtaskState initState3 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 2, 3, 2);

	testHarness1 = createRescalingTestSink(outDir, 3, 0);
	testHarness1.setup();
	testHarness1.initializeState(initState1);
	testHarness1.open();

	checkLocalFs(outDir, 1, 1, 3, 1);

	testHarness2 = createRescalingTestSink(outDir, 3, 1);
	testHarness2.setup();
	testHarness2.initializeState(initState2);
	testHarness2.open();

	checkLocalFs(outDir, 0, 0, 5, 2);

	OneInputStreamOperatorTestHarness<String, Object> testHarness3 = createRescalingTestSink(outDir, 3, 2);
	testHarness3.setup();
	testHarness3.initializeState(initState3);
	testHarness3.open();

	checkLocalFs(outDir, 0, 0, 5, 2);

	testHarness1.processElement(new StreamRecord<>("test6", 0));
	testHarness2.processElement(new StreamRecord<>("test6", 0));
	testHarness3.processElement(new StreamRecord<>("test6", 0));

	// 3 for the different tasks
	checkLocalFs(outDir, 3, 0, 5, 2);

	testHarness1.snapshot(1, 0);
	testHarness2.snapshot(1, 0);
	testHarness3.snapshot(1, 0);

	testHarness1.close();
	testHarness2.close();
	testHarness3.close();

	checkLocalFs(outDir, 0, 3, 5, 2);
}