Java Code Examples for org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness#setup()

The following examples show how to use org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness#setup() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlinkKafkaProducer011ITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testRecoverCommittedTransaction() throws Exception {
	String topic = "flink-kafka-producer-recover-committed-transaction";

	OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic);

	testHarness.setup();
	testHarness.open(); // producerA - start transaction (txn) 0
	testHarness.processElement(42, 0); // producerA - write 42 in txn 0
	OperatorSubtaskState checkpoint0 = testHarness.snapshot(0, 1); // producerA - pre commit txn 0, producerB - start txn 1
	testHarness.processElement(43, 2); // producerB - write 43 in txn 1
	testHarness.notifyOfCompletedCheckpoint(0); // producerA - commit txn 0 and return to the pool
	testHarness.snapshot(1, 3); // producerB - pre txn 1,  producerA - start txn 2
	testHarness.processElement(44, 4); // producerA - write 44 in txn 2
	testHarness.close(); // producerA - abort txn 2

	testHarness = createTestHarness(topic);
	testHarness.initializeState(checkpoint0); // recover state 0 - producerA recover and commit txn 0
	testHarness.close();

	assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(42));

	deleteTestTopic(topic);
	checkProducerLeak();
}
 
Example 2
Source File: KeyedProcessOperatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * This also verifies that the timestamps ouf side-emitted records is correct.
 */
@Test
public void testSideOutput() throws Exception {
	KeyedProcessOperator<Integer, Integer, String> operator = new KeyedProcessOperator<>(new SideOutputProcessFunction());

	OneInputStreamOperatorTestHarness<Integer, String> testHarness =
		new KeyedOneInputStreamOperatorTestHarness<>(
			operator, new IdentityKeySelector<>(), BasicTypeInfo.INT_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>(42, 17L /* timestamp */));

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	expectedOutput.add(new StreamRecord<>("IN:42", 17L /* timestamp */));

	TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());

	ConcurrentLinkedQueue<StreamRecord<Integer>> expectedIntSideOutput = new ConcurrentLinkedQueue<>();
	expectedIntSideOutput.add(new StreamRecord<>(42, 17L /* timestamp */));
	ConcurrentLinkedQueue<StreamRecord<Integer>> intSideOutput =
		testHarness.getSideOutput(SideOutputProcessFunction.INTEGER_OUTPUT_TAG);
	TestHarnessUtil.assertOutputEquals(
		"Side output was not correct.",
		expectedIntSideOutput,
		intSideOutput);

	ConcurrentLinkedQueue<StreamRecord<Long>> expectedLongSideOutput = new ConcurrentLinkedQueue<>();
	expectedLongSideOutput.add(new StreamRecord<>(42L, 17L /* timestamp */));
	ConcurrentLinkedQueue<StreamRecord<Long>> longSideOutput =
		testHarness.getSideOutput(SideOutputProcessFunction.LONG_OUTPUT_TAG);
	TestHarnessUtil.assertOutputEquals(
		"Side output was not correct.",
		expectedLongSideOutput,
		longSideOutput);

	testHarness.close();
}
 
Example 3
Source File: BucketingSinkMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final File outDir = tempFolder.newFolder();

	ValidatingBucketingSink<String> sink = (ValidatingBucketingSink<String>)
			new ValidatingBucketingSink<String>(outDir.getAbsolutePath(), expectedBucketFilesPrefix)
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX)
		.setUseTruncate(false); // don't use truncate because files do not exist

	OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(
		new StreamSink<>(sink), 10, 1, 0);
	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"bucketing-sink-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	assertTrue(sink.initializeCalled);

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkLocalFs(outDir, 1, 1, 0, 0);

	testHarness.close();
}
 
Example 4
Source File: KeyedProcessOperatorTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that we don't have leakage between different keys.
 */
@Test
public void testEventTimeTimerWithState() throws Exception {

	KeyedProcessOperator<Integer, Integer, String> operator =
			new KeyedProcessOperator<>(new TriggeringStatefulFlatMapFunction(TimeDomain.EVENT_TIME));

	OneInputStreamOperatorTestHarness<Integer, String> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new IdentityKeySelector<Integer>(), BasicTypeInfo.INT_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.processWatermark(new Watermark(1));
	testHarness.processElement(new StreamRecord<>(17, 0L)); // should set timer for 6
	testHarness.processElement(new StreamRecord<>(13, 0L)); // should set timer for 6

	testHarness.processWatermark(new Watermark(2));
	testHarness.processElement(new StreamRecord<>(42, 1L)); // should set timer for 7
	testHarness.processElement(new StreamRecord<>(13, 1L)); // should delete timer

	testHarness.processWatermark(new Watermark(6));
	testHarness.processWatermark(new Watermark(7));

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	expectedOutput.add(new Watermark(1L));
	expectedOutput.add(new StreamRecord<>("INPUT:17", 0L));
	expectedOutput.add(new StreamRecord<>("INPUT:13", 0L));
	expectedOutput.add(new Watermark(2L));
	expectedOutput.add(new StreamRecord<>("INPUT:42", 1L));
	expectedOutput.add(new StreamRecord<>("STATE:17", 6L));
	expectedOutput.add(new Watermark(6L));
	expectedOutput.add(new StreamRecord<>("STATE:42", 7L));
	expectedOutput.add(new Watermark(7L));

	TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());

	testHarness.close();
}
 
Example 5
Source File: LegacyKeyedProcessOperatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that we don't have leakage between different keys.
 */
@Test
public void testEventTimeTimerWithState() throws Exception {

	LegacyKeyedProcessOperator<Integer, Integer, String> operator =
			new LegacyKeyedProcessOperator<>(new TriggeringStatefulFlatMapFunction(TimeDomain.EVENT_TIME));

	OneInputStreamOperatorTestHarness<Integer, String> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new IdentityKeySelector<Integer>(), BasicTypeInfo.INT_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.processWatermark(new Watermark(1));
	testHarness.processElement(new StreamRecord<>(17, 0L)); // should set timer for 6

	testHarness.processWatermark(new Watermark(2));
	testHarness.processElement(new StreamRecord<>(42, 1L)); // should set timer for 7

	testHarness.processWatermark(new Watermark(6));
	testHarness.processWatermark(new Watermark(7));

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	expectedOutput.add(new Watermark(1L));
	expectedOutput.add(new StreamRecord<>("INPUT:17", 0L));
	expectedOutput.add(new Watermark(2L));
	expectedOutput.add(new StreamRecord<>("INPUT:42", 1L));
	expectedOutput.add(new StreamRecord<>("STATE:17", 6L));
	expectedOutput.add(new Watermark(6L));
	expectedOutput.add(new StreamRecord<>("STATE:42", 7L));
	expectedOutput.add(new Watermark(7L));

	TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());

	testHarness.close();
}
 
Example 6
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeReducingProcessingTimeWindowsSnapshot() throws Exception {
	final int windowSize = 3;

	ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents",
			new SumReducer<>(),
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(
			TumblingProcessingTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<String, TimeWindow, Tuple2<String, Integer>>()),
			ProcessingTimeTrigger.create(),
			0,
			null /* late data output tag */);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.setProcessingTime(10);
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1)));

	testHarness.setProcessingTime(3010);
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key3", 1)));

	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 1), 2999));
	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 1), 2999));

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-reduce-processing-time-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();

}
 
Example 7
Source File: BulkWriterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void testPartFiles(
		OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Object> testHarness,
		File outDir,
		String partFileName1,
		String partFileName2) throws Exception {

	testHarness.setup();
	testHarness.open();

	// this creates a new bucket "test1" and part-0-0
	testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 1), 1L));
	TestUtils.checkLocalFs(outDir, 1, 0);

	// we take a checkpoint so we roll.
	testHarness.snapshot(1L, 1L);

	// these will close part-0-0 and open part-0-1
	testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 2), 2L));
	testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 3), 3L));

	// we take a checkpoint so we roll again.
	testHarness.snapshot(2L, 2L);

	TestUtils.checkLocalFs(outDir, 2, 0);

	Map<File, String> contents = TestUtils.getFileContentByPath(outDir);
	int fileCounter = 0;
	for (Map.Entry<File, String> fileContents : contents.entrySet()) {
		if (fileContents.getKey().getName().contains(partFileName1)) {
			fileCounter++;
			Assert.assertEquals("test1@1\n", fileContents.getValue());
		} else if (fileContents.getKey().getName().contains(partFileName2)) {
			fileCounter++;
			Assert.assertEquals("test1@2\ntest1@3\n", fileContents.getValue());
		}
	}
	Assert.assertEquals(2L, fileCounter);

	// we acknowledge the latest checkpoint, so everything should be published.
	testHarness.notifyOfCompletedCheckpoint(2L);

	TestUtils.checkLocalFs(outDir, 0, 2);
}
 
Example 8
Source File: ContinuousFileProcessingRescalingTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testReaderScalingUp() throws Exception {
	// simulates the scenario of scaling up from 1 to 2 instances

	final OneShotLatch waitingLatch1 = new OneShotLatch();
	final OneShotLatch triggerLatch1 = new OneShotLatch();

	BlockingFileInputFormat format1 = new BlockingFileInputFormat(
		triggerLatch1, waitingLatch1, new Path("test"), 20, 5);
	FileInputSplit[] splits = format1.createInputSplits(2);

	OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness1 = getTestHarness(format1, 1, 0);
	testHarness1.open();

	testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(0, splits[0])));
	testHarness1.processElement(new StreamRecord<>(getTimestampedSplit(1, splits[1])));

	// wait until its arrives to element 5
	if (!triggerLatch1.isTriggered()) {
		triggerLatch1.await();
	}

	OperatorSubtaskState snapshot = testHarness1.snapshot(0, 0);

	// this will be the init state for new instance-0
	OperatorSubtaskState initState1 =
		AbstractStreamOperatorTestHarness.repartitionOperatorState(snapshot, maxParallelism, 1, 2, 0);

	// this will be the init state for new instance-1
	OperatorSubtaskState initState2 =
		AbstractStreamOperatorTestHarness.repartitionOperatorState(snapshot, maxParallelism, 1, 2, 1);

	// 1) clear the output of instance so that we can compare it with one created by the new instances, and
	// 2) let the operator process the rest of its state
	testHarness1.getOutput().clear();
	waitingLatch1.trigger();

	// create the second instance and let it process the second split till element 15
	final OneShotLatch triggerLatch2 = new OneShotLatch();
	final OneShotLatch waitingLatch2 = new OneShotLatch();

	BlockingFileInputFormat format2 = new BlockingFileInputFormat(
		triggerLatch2, waitingLatch2, new Path("test"), 20, 15);

	OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness2 = getTestHarness(format2, 2, 0);
	testHarness2.setup();
	testHarness2.initializeState(initState1);
	testHarness2.open();

	BlockingFileInputFormat format3 = new BlockingFileInputFormat(
		triggerLatch2, waitingLatch2, new Path("test"), 20, 15);

	OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, String> testHarness3 = getTestHarness(format3, 2, 1);
	testHarness3.setup();
	testHarness3.initializeState(initState2);
	testHarness3.open();

	triggerLatch2.trigger();
	waitingLatch2.trigger();

	// and wait for the processing to finish
	synchronized (testHarness1.getCheckpointLock()) {
		testHarness1.close();
	}
	synchronized (testHarness2.getCheckpointLock()) {
		testHarness2.close();
	}
	synchronized (testHarness3.getCheckpointLock()) {
		testHarness3.close();
	}

	Queue<Object> expectedResult = new ArrayDeque<>();
	putElementsInQ(expectedResult, testHarness1.getOutput());

	Queue<Object> actualResult = new ArrayDeque<>();
	putElementsInQ(actualResult, testHarness2.getOutput());
	putElementsInQ(actualResult, testHarness3.getOutput());

	Assert.assertEquals(35, actualResult.size());
	Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray());
}
 
Example 9
Source File: WindowOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testSlidingCountWindow() throws Exception {
	closeCalled.set(0);
	final int windowSize = 5;
	final int windowSlide = 3;
	LogicalType[] windowTypes = new LogicalType[] { new BigIntType() };

	WindowOperator operator = WindowOperatorBuilder
			.builder()
			.withInputFields(inputFieldTypes)
			.countWindow(windowSize, windowSlide)
			.aggregateAndBuild(getCountWindowAggFunction(), equaliser, accTypes, aggResultTypes, windowTypes);

	OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(operator);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	testHarness.open();

	testHarness.processElement(insertRecord("key2", 1, 0L));
	testHarness.processElement(insertRecord("key2", 2, 1000L));
	testHarness.processElement(insertRecord("key2", 3, 2500L));
	testHarness.processElement(insertRecord("key2", 4, 2500L));
	testHarness.processElement(insertRecord("key2", 5, 2500L));
	testHarness.processElement(insertRecord("key1", 1, 10L));
	testHarness.processElement(insertRecord("key1", 2, 1000L));

	testHarness.processWatermark(new Watermark(12000));
	testHarness.setProcessingTime(12000L);
	expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 15L, 5L, 0L)));
	expectedOutput.add(new Watermark(12000));
	assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());

	// do a snapshot, close and restore again
	OperatorSubtaskState snapshotV2 = testHarness.snapshot(0L, 0);
	testHarness.close();
	expectedOutput.clear();

	testHarness = createTestHarness(operator);
	testHarness.setup();
	testHarness.initializeState(snapshotV2);
	testHarness.open();

	testHarness.processElement(insertRecord("key1", 3, 2500L));
	testHarness.processElement(insertRecord("key1", 4, 2500L));
	testHarness.processElement(insertRecord("key1", 5, 2500L));
	expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 15L, 5L, 0L)));
	assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());

	testHarness.processElement(insertRecord("key2", 6, 6000L));
	testHarness.processElement(insertRecord("key2", 7, 6000L));
	testHarness.processElement(insertRecord("key2", 8, 6050L));
	testHarness.processElement(insertRecord("key2", 9, 6050L));
	expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 30L, 5L, 1L)));
	assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());

	testHarness.processElement(insertRecord("key1", 6, 4000L));
	testHarness.processElement(insertRecord("key1", 7, 4000L));
	testHarness.processElement(insertRecord("key1", 8, 4000L));
	testHarness.processElement(insertRecord("key2", 10, 15000L));
	testHarness.processElement(insertRecord("key2", 11, 15000L));
	expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 30L, 5L, 1L)));
	expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 45L, 5L, 2L)));
	assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());

	testHarness.close();

	// we close once in the rest...
	assertEquals("Close was not called.", 2, closeCalled.get());
}
 
Example 10
Source File: WindowOperatorTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testSessionWindows() throws Exception {
	closeCalled.set(0);

	final int sessionSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			EventTimeTrigger.create(),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			createTestHarness(operator);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	testHarness.open();

	// add elements out-of-order
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 0));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 2), 1000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3), 2500));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 10));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 1000));

	// do a snapshot, close and restore again
	OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
	testHarness.close();

	testHarness = createTestHarness(operator);
	testHarness.setup();
	testHarness.initializeState(snapshot);
	testHarness.open();

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3), 2500));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), 5501));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), 6000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), 6000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 6), 6050));

	testHarness.processWatermark(new Watermark(12000));

	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-6", 10L, 5500L), 5499));
	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-6", 0L, 5500L), 5499));

	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-20", 5501L, 9050L), 9049));
	expectedOutput.add(new Watermark(12000));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 10), 15000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 20), 15000));

	testHarness.processWatermark(new Watermark(17999));

	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-30", 15000L, 18000L), 17999));
	expectedOutput.add(new Watermark(17999));

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());

	testHarness.close();
}
 
Example 11
Source File: BucketingSinkTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testSameParallelismWithShufflingStates() throws Exception {
	final File outDir = tempFolder.newFolder();

	OneInputStreamOperatorTestHarness<String, Object> testHarness1 = createRescalingTestSink(outDir, 2, 0, 100);
	testHarness1.setup();
	testHarness1.open();

	OneInputStreamOperatorTestHarness<String, Object> testHarness2 = createRescalingTestSink(outDir, 2, 1, 100);
	testHarness2.setup();
	testHarness2.open();

	testHarness1.processElement(new StreamRecord<>("test1", 0L));
	checkLocalFs(outDir, 1, 0, 0, 0);

	testHarness2.processElement(new StreamRecord<>("test2", 0L));
	checkLocalFs(outDir, 2, 0, 0, 0);

	// intentionally we snapshot them in the reverse order so that the states are shuffled
	OperatorSubtaskState mergedSnapshot = AbstractStreamOperatorTestHarness.repackageState(
		testHarness2.snapshot(0, 0),
		testHarness1.snapshot(0, 0)
	);

	checkLocalFs(outDir, 2, 0, 0, 0);

	// this will not be included in any checkpoint so it can be cleaned up (although we do not)
	testHarness2.processElement(new StreamRecord<>("test3", 0L));
	checkLocalFs(outDir, 3, 0, 0, 0);

	OperatorSubtaskState initState1 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 2, 2, 0);

	testHarness1 = createRescalingTestSink(outDir, 2, 0, 100);
	testHarness1.setup();
	testHarness1.initializeState(initState1);
	testHarness1.open();

	// the one in-progress will be the one assigned to the next instance,
	// the other is the test3 which is just not cleaned up
	checkLocalFs(outDir, 2, 0, 1, 1);

	OperatorSubtaskState initState2 = AbstractStreamOperatorTestHarness.repartitionOperatorState(
		mergedSnapshot, maxParallelism, 2, 2, 1);

	testHarness2 = createRescalingTestSink(outDir, 2, 1, 100);
	testHarness2.setup();
	testHarness2.initializeState(initState2);
	testHarness2.open();

	checkLocalFs(outDir, 1, 0, 2, 2);

	testHarness1.close();
	testHarness2.close();

	// the 1 in-progress can be discarded.
	checkLocalFs(outDir, 1, 0, 2, 2);
}
 
Example 12
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeWindowsWithKryoSerializedKeysSnapshot() throws Exception {
	final int windowSize = 3;

	TypeInformation<Tuple2<NonPojoType, Integer>> inputType = new TypeHint<Tuple2<NonPojoType, Integer>>() {}.getTypeInfo();

	ReducingStateDescriptor<Tuple2<NonPojoType, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents",
		new SumReducer<>(),
		inputType.createSerializer(new ExecutionConfig()));

	TypeSerializer<NonPojoType> keySerializer = TypeInformation.of(NonPojoType.class).createSerializer(new ExecutionConfig());
	assertTrue(keySerializer instanceof KryoSerializer);

	WindowOperator<NonPojoType, Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>, TimeWindow> operator = new WindowOperator<>(
		TumblingEventTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)),
		new TimeWindow.Serializer(),
		new TupleKeySelector<>(),
		keySerializer,
		stateDesc,
		new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<>()),
		EventTimeTrigger.create(),
		0,
		null /* late data output tag */);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	OneInputStreamOperatorTestHarness<Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>> testHarness =
		new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), TypeInformation.of(NonPojoType.class));

	testHarness.setup();
	testHarness.open();

	// add elements out-of-order
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 3999));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 3000));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 20));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 0));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 999));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1998));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1999));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1000));

	testHarness.processWatermark(new Watermark(999));
	expectedOutput.add(new Watermark(999));
	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	testHarness.processWatermark(new Watermark(1999));
	expectedOutput.add(new Watermark(1999));
	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-kryo-serialized-key-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example 13
Source File: CEPOperatorTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testKeyedCEPOperatorCheckpointing() throws Exception {

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = getCepTestHarness(false);

	try {
		harness.open();

		Event startEvent = new Event(42, "start", 1.0);
		SubEvent middleEvent = new SubEvent(42, "foo", 1.0, 10.0);
		Event endEvent = new Event(42, "end", 1.0);

		harness.processElement(new StreamRecord<>(startEvent, 1L));
		harness.processElement(new StreamRecord<>(new Event(42, "foobar", 1.0), 2L));

		// simulate snapshot/restore with some elements in internal sorting queue
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		harness.close();

		harness = getCepTestHarness(false);

		harness.setup();
		harness.initializeState(snapshot);
		harness.open();

		harness.processWatermark(new Watermark(Long.MIN_VALUE));

		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3L));

		// if element timestamps are not correctly checkpointed/restored this will lead to
		// a pruning time underflow exception in NFA
		harness.processWatermark(new Watermark(2L));

		harness.processElement(new StreamRecord<Event>(middleEvent, 3L));
		harness.processElement(new StreamRecord<>(new Event(42, "start", 1.0), 4L));
		harness.processElement(new StreamRecord<>(endEvent, 5L));

		// simulate snapshot/restore with empty element queue but NFA state
		OperatorSubtaskState snapshot2 = harness.snapshot(1L, 1L);
		harness.close();

		harness = getCepTestHarness(false);

		harness.setup();
		harness.initializeState(snapshot2);
		harness.open();

		harness.processWatermark(new Watermark(Long.MAX_VALUE));

		// get and verify the output

		Queue<Object> result = harness.getOutput();

		assertEquals(2, result.size());

		verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
		verifyWatermark(result.poll(), Long.MAX_VALUE);
	} finally {
		harness.close();
	}
}
 
Example 14
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeStartingNewPatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);
	final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
				getKeyedCepOpearator(false, new NFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processElement(new StreamRecord<Event>(startEvent1, 1));
		harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
		harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-starting-new-pattern-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example 15
Source File: WindowOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void testSlidingEventTimeWindows(OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator) throws Exception {

		OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness =
			createTestHarness(operator);

		testHarness.setup();
		testHarness.open();

		ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

		// add elements out-of-order
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 3999));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 3000));

		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 20));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 0));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 999));

		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1998));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1999));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1000));

		testHarness.processWatermark(new Watermark(999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 999));
		expectedOutput.add(new Watermark(999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(1999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 1999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 3), 1999));
		expectedOutput.add(new Watermark(1999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(2999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 2999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 3), 2999));
		expectedOutput.add(new Watermark(2999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		// do a snapshot, close and restore again
		OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);
		testHarness.close();

		expectedOutput.clear();
		testHarness = createTestHarness(operator);
		testHarness.setup();
		testHarness.initializeState(snapshot);
		testHarness.open();

		testHarness.processWatermark(new Watermark(3999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 5), 3999));
		expectedOutput.add(new Watermark(3999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(4999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 2), 4999));
		expectedOutput.add(new Watermark(4999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(5999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 2), 5999));
		expectedOutput.add(new Watermark(5999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		// those don't have any effect...
		testHarness.processWatermark(new Watermark(6999));
		testHarness.processWatermark(new Watermark(7999));
		expectedOutput.add(new Watermark(6999));
		expectedOutput.add(new Watermark(7999));

		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.close();
	}
 
Example 16
Source File: FlinkKafkaProducer011ITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * This tests checks whether FlinkKafkaProducer011 correctly aborts lingering transactions after a failure,
 * which happened before first checkpoint and was followed up by reducing the parallelism.
 * If such transactions were left alone lingering it consumers would be unable to read committed records
 * that were created after this lingering transaction.
 */
@Test
public void testScaleDownBeforeFirstCheckpoint() throws Exception {
	String topic = "scale-down-before-first-checkpoint";

	List<AutoCloseable> operatorsToClose = new ArrayList<>();
	int preScaleDownParallelism = Math.max(2, FlinkKafkaProducer011.SAFE_SCALE_DOWN_FACTOR);
	for (int subtaskIndex = 0; subtaskIndex < preScaleDownParallelism; subtaskIndex++) {
		OneInputStreamOperatorTestHarness<Integer, Object> preScaleDownOperator = createTestHarness(
			topic,
			preScaleDownParallelism,
			preScaleDownParallelism,
			subtaskIndex,
			EXACTLY_ONCE);

		preScaleDownOperator.setup();
		preScaleDownOperator.open();
		preScaleDownOperator.processElement(subtaskIndex * 2, 0);
		preScaleDownOperator.snapshot(0, 1);
		preScaleDownOperator.processElement(subtaskIndex * 2 + 1, 2);

		operatorsToClose.add(preScaleDownOperator);
	}

	// do not close previous testHarnesses to make sure that closing do not clean up something (in case of failure
	// there might not be any close)

	// After previous failure simulate restarting application with smaller parallelism
	OneInputStreamOperatorTestHarness<Integer, Object> postScaleDownOperator1 = createTestHarness(topic, 1, 1, 0, EXACTLY_ONCE);

	postScaleDownOperator1.setup();
	postScaleDownOperator1.open();

	// write and commit more records, after potentially lingering transactions
	postScaleDownOperator1.processElement(46, 7);
	postScaleDownOperator1.snapshot(4, 8);
	postScaleDownOperator1.processElement(47, 9);
	postScaleDownOperator1.notifyOfCompletedCheckpoint(4);

	//now we should have:
	// - records 42, 43, 44 and 45 in aborted transactions
	// - committed transaction with record 46
	// - pending transaction with record 47
	assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(46));

	postScaleDownOperator1.close();
	// ignore ProducerFencedExceptions, because postScaleDownOperator1 could reuse transactional ids.
	for (AutoCloseable operatorToClose : operatorsToClose) {
		closeIgnoringProducerFenced(operatorToClose);
	}
	deleteTestTopic(topic);
	checkProducerLeak();
}
 
Example 17
Source File: WindowOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testSessionWindowsWithProcessFunction() throws Exception {
	closeCalled.set(0);

	final int sessionSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableProcessWindowFunction<>(new SessionProcessWindowFunction()),
			EventTimeTrigger.create(),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			createTestHarness(operator);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	testHarness.open();

	// add elements out-of-order
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 0));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 2), 1000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3), 2500));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 10));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 1000));

	// do a snapshot, close and restore again
	OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());
	testHarness.close();

	testHarness = createTestHarness(operator);
	testHarness.setup();
	testHarness.initializeState(snapshot);
	testHarness.open();

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 3), 2500));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), 5501));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), 6000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 5), 6000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 6), 6050));

	testHarness.processWatermark(new Watermark(12000));

	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key1-6", 10L, 5500L), 5499));
	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-6", 0L, 5500L), 5499));

	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-20", 5501L, 9050L), 9049));
	expectedOutput.add(new Watermark(12000));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 10), 15000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 20), 15000));

	testHarness.processWatermark(new Watermark(17999));

	expectedOutput.add(new StreamRecord<>(new Tuple3<>("key2-30", 15000L, 18000L), 17999));
	expectedOutput.add(new Watermark(17999));

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple3ResultSortComparator());

	testHarness.close();
}
 
Example 18
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeReducingProcessingTimeWindowsSnapshot() throws Exception {
	final int windowSize = 3;

	ReducingStateDescriptor<Tuple2<String, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents",
			new SumReducer<>(),
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(
			TumblingProcessingTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<String, TimeWindow, Tuple2<String, Integer>>()),
			ProcessingTimeTrigger.create(),
			0,
			null /* late data output tag */);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.setProcessingTime(10);
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1)));

	testHarness.setProcessingTime(3010);
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key3", 1)));

	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 1), 2999));
	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 1), 2999));

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-reduce-processing-time-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();

}
 
Example 19
Source File: WrappingFunctionSnapshotRestoreTest.java    From flink with Apache License 2.0 3 votes vote down vote up
@Test
public void testSnapshotAndRestoreWrappedCheckpointedFunction() throws Exception {

	StreamMap<Integer, Integer> operator = new StreamMap<>(
			new WrappingTestFun(new WrappingTestFun(new InnerTestFun())));

	OneInputStreamOperatorTestHarness<Integer, Integer> testHarness =
			new OneInputStreamOperatorTestHarness<>(operator);

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>(5, 12L));

	// snapshot and restore from scratch
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);

	testHarness.close();

	InnerTestFun innerTestFun = new InnerTestFun();
	operator = new StreamMap<>(new WrappingTestFun(new WrappingTestFun(innerTestFun)));

	testHarness = new OneInputStreamOperatorTestHarness<>(operator);

	testHarness.setup();
	testHarness.initializeState(snapshot);
	testHarness.open();

	Assert.assertTrue(innerTestFun.wasRestored);
	testHarness.close();
}
 
Example 20
Source File: LegacyKeyedProcessOperatorTest.java    From flink with Apache License 2.0 3 votes vote down vote up
@Test
public void testSnapshotAndRestore() throws Exception {

	LegacyKeyedProcessOperator<Integer, Integer, String> operator =
			new LegacyKeyedProcessOperator<>(new BothTriggeringFlatMapFunction());

	OneInputStreamOperatorTestHarness<Integer, String> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new IdentityKeySelector<Integer>(), BasicTypeInfo.INT_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>(5, 12L));

	// snapshot and restore from scratch
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);

	testHarness.close();

	operator = new LegacyKeyedProcessOperator<>(new BothTriggeringFlatMapFunction());

	testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, new IdentityKeySelector<Integer>(), BasicTypeInfo.INT_TYPE_INFO);

	testHarness.setup();
	testHarness.initializeState(snapshot);
	testHarness.open();

	testHarness.setProcessingTime(5);
	testHarness.processWatermark(new Watermark(6));

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	expectedOutput.add(new StreamRecord<>("PROC:1777"));
	expectedOutput.add(new StreamRecord<>("EVENT:1777", 6L));
	expectedOutput.add(new Watermark(6));

	TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());

	testHarness.close();
}