Java Code Examples for org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness#snapshot()

The following examples show how to use org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness#snapshot() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * This test is meant to assure that testAtLeastOnceSink is valid by testing that if flushing is disabled,
 * the snapshot method does indeed finishes without waiting for pending requests;
 * we set a timeout because the test will not finish if the logic is broken.
 */
@Test(timeout = 5000)
public void testDoesNotWaitForPendingRequestsIfFlushingDisabled() throws Exception {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new DummyRetryFailureHandler());
	sink.disableFlushOnCheckpoint(); // disable flushing

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let bulk request succeed
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg-1"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// the snapshot should not block even though we haven't flushed the bulk request
	testHarness.snapshot(1L, 1000L);

	testHarness.close();
}
 
Example 2
Source File: BucketingSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testInactivityPeriodWithLateNotify() throws Exception {
	final File outDir = tempFolder.newFolder();

	OneInputStreamOperatorTestHarness<String, Object> testHarness = createRescalingTestSink(outDir, 1, 0, 100);
	testHarness.setup();
	testHarness.open();

	testHarness.setProcessingTime(0L);

	testHarness.processElement(new StreamRecord<>("test1", 1L));
	testHarness.processElement(new StreamRecord<>("test2", 1L));
	checkLocalFs(outDir, 2, 0 , 0, 0);

	testHarness.setProcessingTime(101L);	// put some in pending
	checkLocalFs(outDir, 0, 2, 0, 0);

	testHarness.snapshot(0, 0);				// put them in pending for 0
	checkLocalFs(outDir, 0, 2, 0, 0);

	testHarness.processElement(new StreamRecord<>("test3", 1L));
	testHarness.processElement(new StreamRecord<>("test4", 1L));

	testHarness.setProcessingTime(202L);	// put some in pending

	testHarness.snapshot(1, 0);				// put them in pending for 1
	checkLocalFs(outDir, 0, 4, 0, 0);

	testHarness.notifyOfCompletedCheckpoint(0);	// put the pending for 0 to the "committed" state
	checkLocalFs(outDir, 0, 2, 2, 0);

	testHarness.notifyOfCompletedCheckpoint(1); // put the pending for 1 to the "committed" state
	checkLocalFs(outDir, 0, 0, 4, 0);
}
 
Example 3
Source File: FlinkKafkaProducerBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test ensuring that if a snapshot call happens right after an async exception is caught, it should be rethrown.
 */
@Test
public void testAsyncErrorRethrownOnCheckpoint() throws Throwable {
	final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
		FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);

	OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));

	testHarness.open();

	testHarness.processElement(new StreamRecord<>("msg-1"));

	// let the message request return an async exception
	producer.getPendingCallbacks().get(0).onCompletion(null, new Exception("artificial async exception"));

	try {
		testHarness.snapshot(123L, 123L);
	} catch (Exception e) {
		// the next invoke should rethrow the async exception
		Assert.assertTrue(e.getCause().getMessage().contains("artificial async exception"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example 4
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testBulkFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example 5
Source File: BulkWriterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void testPartFilesWithIntegerBucketer(
		OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Object> testHarness,
		File outDir,
		String partFileName1,
		String partFileName2,
		String partFileName3) throws Exception {

	testHarness.setup();
	testHarness.open();

	// this creates a new bucket "test1" and part-0-0
	testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 1), 1L));
	TestUtils.checkLocalFs(outDir, 1, 0);

	// we take a checkpoint so we roll.
	testHarness.snapshot(1L, 1L);

	// these will close part-0-0 and open part-0-1 and part-0-2
	testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 2), 2L));
	testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 3), 3L));

	// we take a checkpoint so we roll again.
	testHarness.snapshot(2L, 2L);

	TestUtils.checkLocalFs(outDir, 3, 0);

	Map<File, String> contents = TestUtils.getFileContentByPath(outDir);
	int fileCounter = 0;
	for (Map.Entry<File, String> fileContents : contents.entrySet()) {
		if (fileContents.getKey().getName().contains(partFileName1)) {
			fileCounter++;
			Assert.assertEquals("test1@1\n", fileContents.getValue());
			Assert.assertEquals("1", fileContents.getKey().getParentFile().getName());
		} else if (fileContents.getKey().getName().contains(partFileName2)) {
			fileCounter++;
			Assert.assertEquals("test1@2\n", fileContents.getValue());
			Assert.assertEquals("2", fileContents.getKey().getParentFile().getName());
		} else if (fileContents.getKey().getName().contains(partFileName3)) {
			fileCounter++;
			Assert.assertEquals("test1@3\n", fileContents.getValue());
			Assert.assertEquals("3", fileContents.getKey().getParentFile().getName());
		}
	}
	Assert.assertEquals(3L, fileCounter);

	// we acknowledge the latest checkpoint, so everything should be published.
	testHarness.notifyOfCompletedCheckpoint(2L);

	TestUtils.checkLocalFs(outDir, 0, 3);
}
 
Example 6
Source File: GenericWriteAheadSinkTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
/**
 * Verifies that exceptions thrown by a committer do not fail a job and lead to an abort of notify()
 * and later retry of the affected checkpoints.
 */
public void testCommitterException() throws Exception {

	ListSink2 sink = new ListSink2();

	OneInputStreamOperatorTestHarness<Tuple1<Integer>, Tuple1<Integer>> testHarness =
			new OneInputStreamOperatorTestHarness<>(sink);

	testHarness.open();

	int elementCounter = 1;

	for (int x = 0; x < 10; x++) {
		testHarness.processElement(new StreamRecord<>(generateValue(elementCounter, 0)));
		elementCounter++;
	}

	testHarness.snapshot(0, 0);
	testHarness.notifyOfCompletedCheckpoint(0);

	//isCommitted should have failed, thus sendValues() should never have been called
	Assert.assertEquals(0, sink.values.size());

	for (int x = 0; x < 11; x++) {
		testHarness.processElement(new StreamRecord<>(generateValue(elementCounter, 1)));
		elementCounter++;
	}

	testHarness.snapshot(1, 0);
	testHarness.notifyOfCompletedCheckpoint(1);

	//previous CP should be retried, but will fail the CP commit. Second CP should be skipped.
	Assert.assertEquals(10, sink.values.size());

	for (int x = 0; x < 12; x++) {
		testHarness.processElement(new StreamRecord<>(generateValue(elementCounter, 2)));
		elementCounter++;
	}

	testHarness.snapshot(2, 0);
	testHarness.notifyOfCompletedCheckpoint(2);

	//all CP's should be retried and succeed; since one CP was written twice we have 2 * 10 + 11 + 12 = 43 values
	Assert.assertEquals(43, sink.values.size());
}
 
Example 7
Source File: CEPOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testKeyedCEPOperatorNFAUpdate() throws Exception {

	CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator(
		true,
		new SimpleNFAFactory());
	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(
		operator);

	try {
		harness.open();

		Event startEvent = new Event(42, "c", 1.0);
		SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0);
		Event endEvent = new Event(42, "b", 1.0);

		harness.processElement(new StreamRecord<>(startEvent, 1L));

		// simulate snapshot/restore with some elements in internal sorting queue
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		harness.close();

		operator = CepOperatorTestUtilities.getKeyedCepOpearator(true, new SimpleNFAFactory());
		harness = CepOperatorTestUtilities.getCepTestHarness(operator);

		harness.setup();
		harness.initializeState(snapshot);
		harness.open();

		harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L));
		OperatorSubtaskState snapshot2 = harness.snapshot(0L, 0L);
		harness.close();

		operator = CepOperatorTestUtilities.getKeyedCepOpearator(true, new SimpleNFAFactory());
		harness = CepOperatorTestUtilities.getCepTestHarness(operator);

		harness.setup();
		harness.initializeState(snapshot2);
		harness.open();

		harness.processElement(new StreamRecord<Event>(middleEvent, 4L));
		harness.processElement(new StreamRecord<>(endEvent, 4L));

		// get and verify the output

		Queue<Object> result = harness.getOutput();

		assertEquals(1, result.size());

		verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
	} finally {
		harness.close();
	}
}
 
Example 8
Source File: CEPOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testKeyedCEPOperatorCheckpointing() throws Exception {

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = getCepTestHarness(false);

	try {
		harness.open();

		Event startEvent = new Event(42, "start", 1.0);
		SubEvent middleEvent = new SubEvent(42, "foo", 1.0, 10.0);
		Event endEvent = new Event(42, "end", 1.0);

		harness.processElement(new StreamRecord<>(startEvent, 1L));
		harness.processElement(new StreamRecord<>(new Event(42, "foobar", 1.0), 2L));

		// simulate snapshot/restore with some elements in internal sorting queue
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		harness.close();

		harness = getCepTestHarness(false);

		harness.setup();
		harness.initializeState(snapshot);
		harness.open();

		harness.processWatermark(new Watermark(Long.MIN_VALUE));

		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3L));

		// if element timestamps are not correctly checkpointed/restored this will lead to
		// a pruning time underflow exception in NFA
		harness.processWatermark(new Watermark(2L));

		harness.processElement(new StreamRecord<Event>(middleEvent, 3L));
		harness.processElement(new StreamRecord<>(new Event(42, "start", 1.0), 4L));
		harness.processElement(new StreamRecord<>(endEvent, 5L));

		// simulate snapshot/restore with empty element queue but NFA state
		OperatorSubtaskState snapshot2 = harness.snapshot(1L, 1L);
		harness.close();

		harness = getCepTestHarness(false);

		harness.setup();
		harness.initializeState(snapshot2);
		harness.open();

		harness.processWatermark(new Watermark(Long.MAX_VALUE));

		// get and verify the output

		Queue<Object> result = harness.getOutput();

		assertEquals(2, result.size());

		verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
		verifyWatermark(result.poll(), Long.MAX_VALUE);
	} finally {
		harness.close();
	}
}
 
Example 9
Source File: CEPOperatorTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testCEPOperatorCleanupEventTime() throws Exception {

	Event startEvent1 = new Event(42, "start", 1.0);
	Event startEvent2 = new Event(42, "start", 2.0);
	SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
	SubEvent middleEvent2 = new SubEvent(42, "foo2", 1.0, 10.0);
	SubEvent middleEvent3 = new SubEvent(42, "foo3", 1.0, 10.0);
	Event endEvent1 = new Event(42, "end", 1.0);
	Event endEvent2 = new Event(42, "end", 2.0);

	Event startEventK2 = new Event(43, "start", 1.0);

	CepOperator<Event, Integer, Map<String, List<Event>>> operator = getKeyedCepOperator(false);
	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(operator);

	try {
		harness.open();

		harness.processWatermark(new Watermark(Long.MIN_VALUE));

		harness.processElement(new StreamRecord<>(new Event(42, "foobar", 1.0), 2L));
		harness.processElement(new StreamRecord<Event>(middleEvent1, 2L));
		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3L));
		harness.processElement(new StreamRecord<>(startEvent1, 1L));
		harness.processElement(new StreamRecord<>(startEventK2, 1L));

		// there must be 2 keys 42, 43 registered for the watermark callback
		// all the seen elements must be in the priority queues but no NFA yet.

		assertEquals(2L, harness.numEventTimeTimers());
		assertEquals(4L, operator.getPQSize(42));
		assertEquals(1L, operator.getPQSize(43));
		assertTrue(!operator.hasNonEmptySharedBuffer(42));
		assertTrue(!operator.hasNonEmptySharedBuffer(43));

		harness.processWatermark(new Watermark(2L));

		verifyWatermark(harness.getOutput().poll(), Long.MIN_VALUE);
		verifyWatermark(harness.getOutput().poll(), 2L);

		// still the 2 keys
		// one element in PQ for 42 (the barfoo) as it arrived early
		// for 43 the element entered the NFA and the PQ is empty

		assertEquals(2L, harness.numEventTimeTimers());
		assertTrue(operator.hasNonEmptySharedBuffer(42));
		assertEquals(1L, operator.getPQSize(42));
		assertTrue(operator.hasNonEmptySharedBuffer(43));
		assertTrue(!operator.hasNonEmptyPQ(43));

		harness.processElement(new StreamRecord<>(startEvent2, 4L));
		harness.processElement(new StreamRecord<Event>(middleEvent2, 5L));

		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		harness.close();

		CepOperator<Event, Integer, Map<String, List<Event>>> operator2 = getKeyedCepOperator(false);
		harness = CepOperatorTestUtilities.getCepTestHarness(operator2);
		harness.setup();
		harness.initializeState(snapshot);
		harness.open();

		harness.processElement(new StreamRecord<>(endEvent1, 6L));
		harness.processWatermark(11L);
		harness.processWatermark(12L);

		// now we have 1 key because the 43 expired and was removed.
		// 42 is still there due to startEvent2
		assertEquals(1L, harness.numEventTimeTimers());
		assertTrue(operator2.hasNonEmptySharedBuffer(42));
		assertTrue(!operator2.hasNonEmptyPQ(42));
		assertTrue(!operator2.hasNonEmptySharedBuffer(43));
		assertTrue(!operator2.hasNonEmptyPQ(43));

		verifyPattern(harness.getOutput().poll(), startEvent1, middleEvent1, endEvent1);
		verifyPattern(harness.getOutput().poll(), startEvent1, middleEvent2, endEvent1);
		verifyPattern(harness.getOutput().poll(), startEvent2, middleEvent2, endEvent1);
		verifyWatermark(harness.getOutput().poll(), 11L);
		verifyWatermark(harness.getOutput().poll(), 12L);

		// this is a late event, because timestamp(12) = last watermark(12)
		harness.processElement(new StreamRecord<Event>(middleEvent3, 12L));
		harness.processElement(new StreamRecord<>(endEvent2, 13L));
		harness.processWatermark(20L);
		harness.processWatermark(21L);

		assertTrue(!operator2.hasNonEmptySharedBuffer(42));
		assertTrue(!operator2.hasNonEmptyPQ(42));
		assertEquals(0L, harness.numEventTimeTimers());

		assertEquals(3, harness.getOutput().size());
		verifyPattern(harness.getOutput().poll(), startEvent2, middleEvent2, endEvent2);

		verifyWatermark(harness.getOutput().poll(), 20L);
		verifyWatermark(harness.getOutput().poll(), 21L);
	} finally {
		harness.close();
	}
}
 
Example 10
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeStartingNewPatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);
	final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
				getKeyedCepOpearator(false, new NFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processElement(new StreamRecord<Event>(startEvent1, 1));
		harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
		harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-starting-new-pattern-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example 11
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeWindowsWithKryoSerializedKeysSnapshot() throws Exception {
	final int windowSize = 3;

	TypeInformation<Tuple2<NonPojoType, Integer>> inputType = new TypeHint<Tuple2<NonPojoType, Integer>>() {}.getTypeInfo();

	ReducingStateDescriptor<Tuple2<NonPojoType, Integer>> stateDesc = new ReducingStateDescriptor<>("window-contents",
		new SumReducer<>(),
		inputType.createSerializer(new ExecutionConfig()));

	TypeSerializer<NonPojoType> keySerializer = TypeInformation.of(NonPojoType.class).createSerializer(new ExecutionConfig());
	assertTrue(keySerializer instanceof KryoSerializer);

	WindowOperator<NonPojoType, Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>, TimeWindow> operator = new WindowOperator<>(
		TumblingEventTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)),
		new TimeWindow.Serializer(),
		new TupleKeySelector<>(),
		keySerializer,
		stateDesc,
		new InternalSingleValueWindowFunction<>(new PassThroughWindowFunction<>()),
		EventTimeTrigger.create(),
		0,
		null /* late data output tag */);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	OneInputStreamOperatorTestHarness<Tuple2<NonPojoType, Integer>, Tuple2<NonPojoType, Integer>> testHarness =
		new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), TypeInformation.of(NonPojoType.class));

	testHarness.setup();
	testHarness.open();

	// add elements out-of-order
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 3999));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 3000));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 20));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 0));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key1"), 1), 999));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1998));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1999));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>(new NonPojoType("key2"), 1), 1000));

	testHarness.processWatermark(new Watermark(999));
	expectedOutput.add(new Watermark(999));
	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	testHarness.processWatermark(new Watermark(1999));
	expectedOutput.add(new Watermark(1999));
	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-kryo-serialized-key-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example 12
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSessionWindowsWithCountTriggerSnapshot() throws Exception {
	final int sessionSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<String>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			PurgingTrigger.of(CountTrigger.of(4)),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	// add elements out-of-order
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 0));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 2), 1000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3), 2500));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), 3500));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 10));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 1000));

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);

	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-session-with-stateful-trigger-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example 13
Source File: CEPMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeAfterBranchingPatternSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent = new Event(42, "start", 1.0);
	final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
	final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
				getKeyedCepOpearator(false, new NFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();

		harness.processElement(new StreamRecord<Event>(startEvent, 1));
		harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
		harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
		harness.processElement(new StreamRecord<Event>(middleEvent2, 3));

		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-after-branching-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example 14
Source File: GenericWriteAheadSinkTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
/**
 * Verifies that exceptions thrown by a committer do not fail a job and lead to an abort of notify()
 * and later retry of the affected checkpoints.
 */
public void testCommitterException() throws Exception {

	ListSink2 sink = new ListSink2();

	OneInputStreamOperatorTestHarness<Tuple1<Integer>, Tuple1<Integer>> testHarness =
			new OneInputStreamOperatorTestHarness<>(sink);

	testHarness.open();

	int elementCounter = 1;

	for (int x = 0; x < 10; x++) {
		testHarness.processElement(new StreamRecord<>(generateValue(elementCounter, 0)));
		elementCounter++;
	}

	testHarness.snapshot(0, 0);
	testHarness.notifyOfCompletedCheckpoint(0);

	//isCommitted should have failed, thus sendValues() should never have been called
	Assert.assertEquals(0, sink.values.size());

	for (int x = 0; x < 11; x++) {
		testHarness.processElement(new StreamRecord<>(generateValue(elementCounter, 1)));
		elementCounter++;
	}

	testHarness.snapshot(1, 0);
	testHarness.notifyOfCompletedCheckpoint(1);

	//previous CP should be retried, but will fail the CP commit. Second CP should be skipped.
	Assert.assertEquals(10, sink.values.size());

	for (int x = 0; x < 12; x++) {
		testHarness.processElement(new StreamRecord<>(generateValue(elementCounter, 2)));
		elementCounter++;
	}

	testHarness.snapshot(2, 0);
	testHarness.notifyOfCompletedCheckpoint(2);

	//all CP's should be retried and succeed; since one CP was written twice we have 2 * 10 + 11 + 12 = 43 values
	Assert.assertEquals(43, sink.values.size());
}
 
Example 15
Source File: AsyncWaitOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the AysncWaitOperator can restart if checkpointed queue was full.
 *
 * <p>See FLINK-7949
 */
@Test(timeout = 10000)
public void testRestartWithFullQueue() throws Exception {
	final int capacity = 10;

	// 1. create the snapshot which contains capacity + 1 elements
	final CompletableFuture<Void> trigger = new CompletableFuture<>();

	final OneInputStreamOperatorTestHarness<Integer, Integer> snapshotHarness = createTestHarness(
		new ControllableAsyncFunction<>(trigger), // the NoOpAsyncFunction is like a blocking function
		1000L,
		capacity,
		AsyncDataStream.OutputMode.ORDERED);

	snapshotHarness.open();

	final OperatorSubtaskState snapshot;

	final ArrayList<Integer> expectedOutput = new ArrayList<>(capacity);

	try {
		synchronized (snapshotHarness.getCheckpointLock()) {
			for (int i = 0; i < capacity; i++) {
				snapshotHarness.processElement(i, 0L);
				expectedOutput.add(i);
			}
		}

		synchronized (snapshotHarness.getCheckpointLock()) {
			// execute the snapshot within the checkpoint lock, because then it is guaranteed
			// that the lastElementWriter has written the exceeding element
			snapshot = snapshotHarness.snapshot(0L, 0L);
		}

		// trigger the computation to make the close call finish
		trigger.complete(null);
	} finally {
		synchronized (snapshotHarness.getCheckpointLock()) {
			snapshotHarness.close();
		}
	}

	// 2. restore the snapshot and check that we complete
	final OneInputStreamOperatorTestHarness<Integer, Integer> recoverHarness = createTestHarness(
		new ControllableAsyncFunction<>(CompletableFuture.completedFuture(null)),
		1000L,
		capacity,
		AsyncDataStream.OutputMode.ORDERED);

	recoverHarness.initializeState(snapshot);

	synchronized (recoverHarness.getCheckpointLock()) {
		recoverHarness.open();
	}

	synchronized (recoverHarness.getCheckpointLock()) {
		recoverHarness.endInput();
		recoverHarness.close();
	}

	final ConcurrentLinkedQueue<Object> output = recoverHarness.getOutput();

	final List<Integer> outputElements = output.stream()
			.map(r -> ((StreamRecord<Integer>) r).getValue())
			.collect(Collectors.toList());

	assertThat(outputElements, Matchers.equalTo(expectedOutput));
}
 
Example 16
Source File: ProcTimeSortOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void test() throws Exception {
	ProcTimeSortOperator operator = createSortOperator();
	OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(operator);
	testHarness.open();
	testHarness.setProcessingTime(0L);
	testHarness.processElement(insertRecord(3, 3L, "Hello world", 3));
	testHarness.processElement(insertRecord(2, 2L, "Hello", 2));
	testHarness.processElement(insertRecord(6, 2L, "Luke Skywalker", 6));
	testHarness.processElement(insertRecord(5, 3L, "I am fine.", 5));
	testHarness.processElement(insertRecord(7, 1L, "Comment#1", 7));
	testHarness.processElement(insertRecord(9, 4L, "Comment#3", 9));
	testHarness.setProcessingTime(1L);

	List<Object> expectedOutput = new ArrayList<>();
	expectedOutput.add(insertRecord(2, 2L, "Hello", 2));
	expectedOutput.add(insertRecord(3, 3L, "Hello world", 3));
	expectedOutput.add(insertRecord(5, 3L, "I am fine.", 5));
	expectedOutput.add(insertRecord(6, 2L, "Luke Skywalker", 6));
	expectedOutput.add(insertRecord(7, 1L, "Comment#1", 7));
	expectedOutput.add(insertRecord(9, 4L, "Comment#3", 9));
	assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());

	testHarness.processElement(insertRecord(10, 4L, "Comment#4", 10));
	testHarness.processElement(insertRecord(8, 4L, "Comment#2", 8));
	testHarness.processElement(insertRecord(1, 1L, "Hi", 2));
	testHarness.processElement(insertRecord(1, 1L, "Hi", 1));
	testHarness.processElement(insertRecord(4, 3L, "Helloworld, how are you?", 4));
	testHarness.processElement(insertRecord(4, 5L, "Hello, how are you?", 4));

	// do a snapshot, data could be recovered from state
	OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0);
	testHarness.close();

	expectedOutput.clear();

	operator = createSortOperator();
	testHarness = createTestHarness(operator);
	testHarness.initializeState(snapshot);
	testHarness.open();
	testHarness.processElement(insertRecord(5, 3L, "I am fine.", 6));
	testHarness.setProcessingTime(1L);

	expectedOutput.add(insertRecord(1, 1L, "Hi", 2));
	expectedOutput.add(insertRecord(1, 1L, "Hi", 1));
	expectedOutput.add(insertRecord(4, 3L, "Helloworld, how are you?", 4));
	expectedOutput.add(insertRecord(4, 5L, "Hello, how are you?", 4));
	expectedOutput.add(insertRecord(5, 3L, "I am fine.", 6));
	expectedOutput.add(insertRecord(8, 4L, "Comment#2", 8));
	expectedOutput.add(insertRecord(10, 4L, "Comment#4", 10));
	assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
}
 
Example 17
Source File: FlinkKafkaProducer011ITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * This tests checks whether FlinkKafkaProducer011 correctly aborts lingering transactions after a failure.
 * If such transactions were left alone lingering it consumers would be unable to read committed records
 * that were created after this lingering transaction.
 */
@Test
public void testFailBeforeNotifyAndResumeWorkAfterwards() throws Exception {
	String topic = "flink-kafka-producer-fail-before-notify";

	OneInputStreamOperatorTestHarness<Integer, Object> testHarness1 = createTestHarness(topic);
	checkProducerLeak();
	testHarness1.setup();
	testHarness1.open();
	testHarness1.processElement(42, 0);
	testHarness1.snapshot(0, 1);
	testHarness1.processElement(43, 2);
	OperatorSubtaskState snapshot1 = testHarness1.snapshot(1, 3);

	testHarness1.processElement(44, 4);
	testHarness1.snapshot(2, 5);
	testHarness1.processElement(45, 6);

	// do not close previous testHarness to make sure that closing do not clean up something (in case of failure
	// there might not be any close)
	OneInputStreamOperatorTestHarness<Integer, Object> testHarness2 = createTestHarness(topic);
	testHarness2.setup();
	// restore from snapshot1, transactions with records 44 and 45 should be aborted
	testHarness2.initializeState(snapshot1);
	testHarness2.open();

	// write and commit more records, after potentially lingering transactions
	testHarness2.processElement(46, 7);
	testHarness2.snapshot(4, 8);
	testHarness2.processElement(47, 9);
	testHarness2.notifyOfCompletedCheckpoint(4);

	//now we should have:
	// - records 42 and 43 in committed transactions
	// - aborted transactions with records 44 and 45
	// - committed transaction with record 46
	// - pending transaction with record 47
	assertExactlyOnceForTopic(createProperties(), topic, 0, Arrays.asList(42, 43, 46));

	try {
		testHarness1.close();
	} catch (Exception e) {
		// The only acceptable exception is ProducerFencedException because testHarness2 uses the same
		// transactional ID.
		if (!(e.getCause() instanceof ProducerFencedException)) {
			fail("Received unexpected exception " + e);
		}
	}
	testHarness2.close();
	deleteTestTopic(topic);
	checkProducerLeak();
}
 
Example 18
Source File: CEPOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testKeyedCEPOperatorNFAUpdate() throws Exception {

	CepOperator<Event, Integer, Map<String, List<Event>>> operator = CepOperatorTestUtilities.getKeyedCepOpearator(
		true,
		new SimpleNFAFactory());
	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness = CepOperatorTestUtilities.getCepTestHarness(
		operator);

	try {
		harness.open();

		Event startEvent = new Event(42, "c", 1.0);
		SubEvent middleEvent = new SubEvent(42, "a", 1.0, 10.0);
		Event endEvent = new Event(42, "b", 1.0);

		harness.processElement(new StreamRecord<>(startEvent, 1L));

		// simulate snapshot/restore with some elements in internal sorting queue
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		harness.close();

		operator = CepOperatorTestUtilities.getKeyedCepOpearator(true, new SimpleNFAFactory());
		harness = CepOperatorTestUtilities.getCepTestHarness(operator);

		harness.setup();
		harness.initializeState(snapshot);
		harness.open();

		harness.processElement(new StreamRecord<>(new Event(42, "d", 1.0), 4L));
		OperatorSubtaskState snapshot2 = harness.snapshot(0L, 0L);
		harness.close();

		operator = CepOperatorTestUtilities.getKeyedCepOpearator(true, new SimpleNFAFactory());
		harness = CepOperatorTestUtilities.getCepTestHarness(operator);

		harness.setup();
		harness.initializeState(snapshot2);
		harness.open();

		harness.processElement(new StreamRecord<Event>(middleEvent, 4L));
		harness.processElement(new StreamRecord<>(endEvent, 4L));

		// get and verify the output

		Queue<Object> result = harness.getOutput();

		assertEquals(1, result.size());

		verifyPattern(result.poll(), startEvent, middleEvent, endEvent);
	} finally {
		harness.close();
	}
}
 
Example 19
Source File: WindowOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void testSlidingEventTimeWindows(OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator) throws Exception {

		OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness =
			createTestHarness(operator);

		testHarness.setup();
		testHarness.open();

		ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

		// add elements out-of-order
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 3999));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 3000));

		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 20));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 0));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 999));

		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1998));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1999));
		testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 1000));

		testHarness.processWatermark(new Watermark(999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 999));
		expectedOutput.add(new Watermark(999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(1999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 1999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 3), 1999));
		expectedOutput.add(new Watermark(1999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(2999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 2999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 3), 2999));
		expectedOutput.add(new Watermark(2999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		// do a snapshot, close and restore again
		OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);
		testHarness.close();

		expectedOutput.clear();
		testHarness = createTestHarness(operator);
		testHarness.setup();
		testHarness.initializeState(snapshot);
		testHarness.open();

		testHarness.processWatermark(new Watermark(3999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 5), 3999));
		expectedOutput.add(new Watermark(3999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(4999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 2), 4999));
		expectedOutput.add(new Watermark(4999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.processWatermark(new Watermark(5999));
		expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 2), 5999));
		expectedOutput.add(new Watermark(5999));
		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		// those don't have any effect...
		testHarness.processWatermark(new Watermark(6999));
		testHarness.processWatermark(new Watermark(7999));
		expectedOutput.add(new Watermark(6999));
		expectedOutput.add(new Watermark(7999));

		TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator());

		testHarness.close();
	}
 
Example 20
Source File: WrappingFunctionSnapshotRestoreTest.java    From Flink-CEPplus with Apache License 2.0 3 votes vote down vote up
@Test
public void testSnapshotAndRestoreWrappedCheckpointedFunction() throws Exception {

	StreamMap<Integer, Integer> operator = new StreamMap<>(
			new WrappingTestFun(new WrappingTestFun(new InnerTestFun())));

	OneInputStreamOperatorTestHarness<Integer, Integer> testHarness =
			new OneInputStreamOperatorTestHarness<>(operator);

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>(5, 12L));

	// snapshot and restore from scratch
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);

	testHarness.close();

	InnerTestFun innerTestFun = new InnerTestFun();
	operator = new StreamMap<>(new WrappingTestFun(new WrappingTestFun(innerTestFun)));

	testHarness = new OneInputStreamOperatorTestHarness<>(operator);

	testHarness.setup();
	testHarness.initializeState(snapshot);
	testHarness.open();

	Assert.assertTrue(innerTestFun.wasRestored);
	testHarness.close();
}