org.apache.flink.streaming.util.OperatorSnapshotUtil Java Examples

The following examples show how to use org.apache.flink.streaming.util.OperatorSnapshotUtil. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BucketingSinkMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final File outDir = tempFolder.newFolder();

	ValidatingBucketingSink<String> sink = (ValidatingBucketingSink<String>)
			new ValidatingBucketingSink<String>(outDir.getAbsolutePath(), expectedBucketFilesPrefix)
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX)
		.setUseTruncate(false); // don't use truncate because files do not exist

	OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(
		new StreamSink<>(sink), 10, 1, 0);
	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"bucketing-sink-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	assertTrue(sink.initializeCalled);

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkLocalFs(outDir, 1, 1, 0, 0);

	testHarness.close();
}
 
Example #2
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSessionWindowsWithCountTriggerInMintConditionSnapshot() throws Exception {

	final int sessionSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<String>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			PurgingTrigger.of(CountTrigger.of(4)),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-session-with-stateful-trigger-mint-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example #3
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeAndOrSubtypConditionsPatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
		new KeyedOneInputStreamOperatorTestHarness<>(
			getKeyedCepOpearator(false, new NFAComplexConditionsFactory()),
			keySelector,
			BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processElement(new StreamRecord<>(startEvent1, 5));
		harness.processWatermark(new Watermark(6));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-conditions-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #4
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example #5
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeAndOrSubtypConditionsPatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
		new KeyedOneInputStreamOperatorTestHarness<>(
			getKeyedCepOpearator(false, new NFAComplexConditionsFactory()),
			keySelector,
			BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processElement(new StreamRecord<>(startEvent1, 5));
		harness.processWatermark(new Watermark(6));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-conditions-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #6
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSinglePatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
					getKeyedCepOpearator(false, new SinglePatternNFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-single-pattern-afterwards-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #7
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example #8
Source File: CEPMigrationTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeAndOrSubtypConditionsPatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
		new KeyedOneInputStreamOperatorTestHarness<>(
			getKeyedCepOpearator(false, new NFAComplexConditionsFactory()),
			keySelector,
			BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processElement(new StreamRecord<>(startEvent1, 5));
		harness.processWatermark(new Watermark(6));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-conditions-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #9
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled.
 */
@Test
public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception {
	assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2);

	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, 1000L); // discovery enabled

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file; should fail since discovery is enabled
	try {
		testHarness.initializeState(
			OperatorSnapshotUtil.getResourceFilename(
				"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

		fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled.");
	} catch (Exception e) {
		Assert.assertTrue(e instanceof IllegalArgumentException);
	}
}
 
Example #10
Source File: CEPMigrationTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSinglePatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
					getKeyedCepOpearator(false, new SinglePatternNFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-single-pattern-afterwards-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #11
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example #12
Source File: BucketingSinkMigrationTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final File outDir = tempFolder.newFolder();

	ValidatingBucketingSink<String> sink = (ValidatingBucketingSink<String>)
			new ValidatingBucketingSink<String>(outDir.getAbsolutePath(), expectedBucketFilesPrefix)
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX)
		.setUseTruncate(false); // don't use truncate because files do not exist

	OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(
		new StreamSink<>(sink), 10, 1, 0);
	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"bucketing-sink-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	assertTrue(sink.initializeCalled);

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkLocalFs(outDir, 1, 1, 0, 0);

	testHarness.close();
}
 
Example #13
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example #14
Source File: KafkaMigrationTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSnapshot() throws Exception {
	try {
		checkState(flinkGenerateSavepointVersion.isPresent());
		startClusters();

		OperatorSubtaskState snapshot = initializeTestState();
		OperatorSnapshotUtil.writeStateHandle(snapshot, getOperatorSnapshotPath(flinkGenerateSavepointVersion.get()));
	}
	finally {
		shutdownClusters();
	}
}
 
Example #15
Source File: BucketingSinkMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testRestore() throws Exception {
	final File outDir = tempFolder.newFolder();

	ValidatingBucketingSink<String> sink = (ValidatingBucketingSink<String>)
			new ValidatingBucketingSink<String>(outDir.getAbsolutePath(), expectedBucketFilesPrefix)
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX)
		.setUseTruncate(false); // don't use truncate because files do not exist

	OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(
		new StreamSink<>(sink), 10, 1, 0);
	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"bucketing-sink-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	assertTrue(sink.initializeCalled);

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkLocalFs(outDir, 1, 1, 0, 0);

	testHarness.close();
}
 
Example #16
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSinglePatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
					getKeyedCepOpearator(false, new SinglePatternNFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-single-pattern-afterwards-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #17
Source File: BucketingSinkMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeSnapshot() throws Exception {

	final File outDir = tempFolder.newFolder();

	BucketingSink<String> sink = new BucketingSink<String>(outDir.getAbsolutePath())
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX);

	OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkLocalFs(outDir, 1, 1, 0, 0);

	testHarness.processElement(new StreamRecord<>("test3", 0L));
	testHarness.processElement(new StreamRecord<>("test4", 0L));
	testHarness.processElement(new StreamRecord<>("test5", 0L));

	checkLocalFs(outDir, 1, 4, 0, 0);

	OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);

	OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/bucketing-sink-migration-test-flink" + flinkGenerateSavepointVersion + "-snapshot");
	testHarness.close();
}
 
Example #18
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example #19
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSessionWindowsWithCountTriggerInMintConditionSnapshot() throws Exception {

	final int sessionSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<String>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			PurgingTrigger.of(CountTrigger.of(4)),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-session-with-stateful-trigger-mint-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example #20
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception {

		final OneShotLatch latch = new OneShotLatch();
		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);

		doAnswer(new Answer<Void>() {
			@Override
			public Void answer(InvocationOnMock invocation) throws Throwable {
				latch.trigger();
				return null;
			}
		}).when(fetcher).runFetchLoop();

		when(fetcher.snapshotCurrentState()).thenReturn(state);

		final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

		final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

		StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
				new StreamSource<>(consumerFunction);

		final AbstractStreamOperatorTestHarness<String> testHarness =
				new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

		testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

		testHarness.setup();
		testHarness.open();

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					consumerFunction.run(new DummySourceContext() {
						@Override
						public void collect(String element) {

						}
					});
				}
				catch (Throwable t) {
					t.printStackTrace();
					error[0] = t;
				}
			}
		};
		runner.start();

		if (!latch.isTriggered()) {
			latch.await();
		}

		final OperatorSubtaskState snapshot;
		synchronized (testHarness.getCheckpointLock()) {
			snapshot = testHarness.snapshot(0L, 0L);
		}

		OperatorSnapshotUtil.writeStateHandle(snapshot, path);

		consumerOperator.close();
		runner.join();
	}
 
Example #21
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeStartingNewPatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);
	final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
				getKeyedCepOpearator(false, new NFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processElement(new StreamRecord<Event>(startEvent1, 1));
		harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
		harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-starting-new-pattern-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
Example #22
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeApplyProcessingTimeWindowsSnapshot() throws Exception {
	final int windowSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(
			TumblingProcessingTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new RichSumReducer<TimeWindow>()),
			ProcessingTimeTrigger.create(),
			0,
			null /* late data output tag */);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	testHarness.setProcessingTime(10);
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1)));

	testHarness.setProcessingTime(3010);
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1)));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key3", 1)));

	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 1), 2999));
	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 1), 2999));

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-apply-processing-time-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example #23
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreApplyEventTimeWindows() throws Exception {
	final int windowSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple2<String, Integer>, TimeWindow> operator = new WindowOperator<>(
			TumblingEventTimeWindows.of(Time.of(windowSize, TimeUnit.SECONDS)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new RichSumReducer<TimeWindow>()),
			EventTimeTrigger.create(),
			0,
			null /* late data output tag */);

	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple2<String, Integer>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"win-op-migration-test-apply-event-time-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	testHarness.processWatermark(new Watermark(2999));
	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key1", 3), 2999));
	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 3), 2999));
	expectedOutput.add(new Watermark(2999));

	testHarness.processWatermark(new Watermark(3999));
	expectedOutput.add(new Watermark(3999));

	testHarness.processWatermark(new Watermark(4999));
	expectedOutput.add(new Watermark(4999));

	testHarness.processWatermark(new Watermark(5999));
	expectedOutput.add(new StreamRecord<>(new Tuple2<>("key2", 2), 5999));
	expectedOutput.add(new Watermark(5999));

	TestHarnessUtil.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput(), new Tuple2ResultSortComparator<>());
	testHarness.close();
}
 
Example #24
Source File: WindowOperatorMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSessionWindowsWithCountTriggerSnapshot() throws Exception {
	final int sessionSize = 3;

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			STRING_INT_TUPLE.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector<String>(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			PurgingTrigger.of(CountTrigger.of(4)),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector<>(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	// add elements out-of-order
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 1), 0));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 2), 1000));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 3), 2500));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key2", 4), 3500));

	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 1), 10));
	testHarness.processElement(new StreamRecord<>(new Tuple2<>("key1", 2), 1000));

	// do snapshot and save to file
	OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0L);

	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-session-with-stateful-trigger-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
Example #25
Source File: ContinuousFileProcessingMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeMonitoringSourceSnapshot() throws Exception {

	File testFolder = tempFolder.newFolder();

	long fileModTime = Long.MIN_VALUE;
	for (int i = 0; i < 1; i++) {
		Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line.");
		fileModTime = file.f0.lastModified();
	}

	TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath()));

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src =
		new StreamSource<>(monitoringFunction);

	final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);

	testHarness.open();

	final Throwable[] error = new Throwable[1];

	final OneShotLatch latch = new OneShotLatch();

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				monitoringFunction.run(new DummySourceContext() {
					@Override
					public void collect(TimestampedFileInputSplit element) {
						latch.trigger();
					}

					@Override
					public void markAsTemporarilyIdle() {

					}
				});
			}
			catch (Throwable t) {
				t.printStackTrace();
				error[0] = t;
			}
		}
	};
	runner.start();

	if (!latch.isTriggered()) {
		latch.await();
	}

	final OperatorSubtaskState snapshot;
	synchronized (testHarness.getCheckpointLock()) {
		snapshot = testHarness.snapshot(0L, 0L);
	}

	OperatorSnapshotUtil.writeStateHandle(
			snapshot,
			"src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot");

	monitoringFunction.cancel();
	runner.join();

	testHarness.close();
}
 
Example #26
Source File: ContinuousFileProcessingMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testReaderRestore() throws Exception {
	File testFolder = tempFolder.newFolder();

	final OneShotLatch latch = new OneShotLatch();

	BlockingFileInputFormat format = new BlockingFileInputFormat(latch, new Path(testFolder.getAbsolutePath()));
	TypeInformation<FileInputSplit> typeInfo = TypeExtractor.getInputFormatTypes(format);

	ContinuousFileReaderOperator<FileInputSplit> initReader = new ContinuousFileReaderOperator<>(format);
	initReader.setOutputType(typeInfo, new ExecutionConfig());

	OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, FileInputSplit> testHarness =
		new OneInputStreamOperatorTestHarness<>(initReader);
	testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"reader-migration-test-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	latch.trigger();

	// ... and wait for the operators to close gracefully

	synchronized (testHarness.getCheckpointLock()) {
		testHarness.close();
	}

	TimestampedFileInputSplit split1 =
			new TimestampedFileInputSplit(0, 3, new Path("test/test1"), 0, 100, null);

	TimestampedFileInputSplit split2 =
			new TimestampedFileInputSplit(10, 2, new Path("test/test2"), 101, 200, null);

	TimestampedFileInputSplit split3 =
			new TimestampedFileInputSplit(10, 1, new Path("test/test2"), 0, 100, null);

	TimestampedFileInputSplit split4 =
			new TimestampedFileInputSplit(11, 0, new Path("test/test3"), 0, 100, null);

	// compare if the results contain what they should contain and also if
	// they are the same, as they should.

	Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split1)));
	Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split2)));
	Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split3)));
	Assert.assertTrue(testHarness.getOutput().contains(new StreamRecord<>(split4)));
}
 
Example #27
Source File: ContinuousFileProcessingMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeReaderSnapshot() throws Exception {

	File testFolder = tempFolder.newFolder();

	TimestampedFileInputSplit split1 =
			new TimestampedFileInputSplit(0, 3, new Path("test/test1"), 0, 100, null);

	TimestampedFileInputSplit split2 =
			new TimestampedFileInputSplit(10, 2, new Path("test/test2"), 101, 200, null);

	TimestampedFileInputSplit split3 =
			new TimestampedFileInputSplit(10, 1, new Path("test/test2"), 0, 100, null);

	TimestampedFileInputSplit split4 =
			new TimestampedFileInputSplit(11, 0, new Path("test/test3"), 0, 100, null);

	// this always blocks to ensure that the reader doesn't to any actual processing so that
	// we keep the state for the four splits
	final OneShotLatch blockingLatch = new OneShotLatch();
	BlockingFileInputFormat format = new BlockingFileInputFormat(blockingLatch, new Path(testFolder.getAbsolutePath()));

	TypeInformation<FileInputSplit> typeInfo = TypeExtractor.getInputFormatTypes(format);
	ContinuousFileReaderOperator<FileInputSplit> initReader = new ContinuousFileReaderOperator<>(
			format);
	initReader.setOutputType(typeInfo, new ExecutionConfig());
	OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, FileInputSplit> testHarness =
			new OneInputStreamOperatorTestHarness<>(initReader);
	testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);
	testHarness.open();
	// create some state in the reader
	testHarness.processElement(new StreamRecord<>(split1));
	testHarness.processElement(new StreamRecord<>(split2));
	testHarness.processElement(new StreamRecord<>(split3));
	testHarness.processElement(new StreamRecord<>(split4));
	// take a snapshot of the operator's state. This will be used
	// to initialize another reader and compare the results of the
	// two operators.

	final OperatorSubtaskState snapshot;
	synchronized (testHarness.getCheckpointLock()) {
		snapshot = testHarness.snapshot(0L, 0L);
	}

	OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/reader-migration-test-flink" + flinkGenerateSavepointVersion + "-snapshot");
}
 
Example #28
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testAndOrSubtypeConditionsAfterMigration() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
		new KeyedOneInputStreamOperatorTestHarness<>(
			getKeyedCepOpearator(false, new NFAComplexConditionsFactory()),
			keySelector,
			BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();

		harness.initializeState(
			OperatorSnapshotUtil.getResourceFilename(
				"cep-migration-conditions-flink" + migrateVersion + "-snapshot"));

		harness.open();

		final Event endEvent = new SubEvent(42, "end", 1.0, 2.0);
		harness.processElement(new StreamRecord<>(endEvent, 9));
		harness.processWatermark(new Watermark(20));

		ConcurrentLinkedQueue<Object> result = harness.getOutput();

		// watermark and the result
		assertEquals(2, result.size());

		Object resultObject = result.poll();
		assertTrue(resultObject instanceof StreamRecord);
		StreamRecord<?> resultRecord = (StreamRecord<?>) resultObject;
		assertTrue(resultRecord.getValue() instanceof Map);

		@SuppressWarnings("unchecked")
		Map<String, List<Event>> patternMap =
			(Map<String, List<Event>>) resultRecord.getValue();

		assertEquals(startEvent1, patternMap.get("start").get(0));
		assertEquals(endEvent, patternMap.get("start").get(1));
	} finally {
		harness.close();
	}
}
 
Example #29
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testSinglePatternAfterMigration() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
					getKeyedCepOpearator(false, new SinglePatternNFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();

		harness.initializeState(
			OperatorSnapshotUtil.getResourceFilename(
				"cep-migration-single-pattern-afterwards-flink" + migrateVersion + "-snapshot"));

		harness.open();

		harness.processElement(new StreamRecord<>(startEvent1, 5));

		harness.processWatermark(new Watermark(20));

		ConcurrentLinkedQueue<Object> result = harness.getOutput();

		// watermark and the result
		assertEquals(2, result.size());

		Object resultObject = result.poll();
		assertTrue(resultObject instanceof StreamRecord);
		StreamRecord<?> resultRecord = (StreamRecord<?>) resultObject;
		assertTrue(resultRecord.getValue() instanceof Map);

		@SuppressWarnings("unchecked")
		Map<String, List<Event>> patternMap =
			(Map<String, List<Event>>) resultRecord.getValue();

		assertEquals(startEvent1, patternMap.get("start").get(0));
	} finally {
		harness.close();
	}
}
 
Example #30
Source File: CEPMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeAfterBranchingPatternSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent = new Event(42, "start", 1.0);
	final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
	final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
				getKeyedCepOpearator(false, new NFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();

		harness.processElement(new StreamRecord<Event>(startEvent, 1));
		harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
		harness
			.processElement(new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
		harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
		harness.processElement(new StreamRecord<Event>(middleEvent2, 3));

		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-after-branching-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}