Java Code Examples for org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness#open()

The following examples show how to use org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness#open() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testOpen() throws Exception {
	MockSerializationSchema<String> serializationSchema = new MockSerializationSchema<>();

	RMQSink<String> producer = new RMQSink<>(rmqConnectionConfig, serializationSchema, publishOptions);
	AbstractStreamOperatorTestHarness<Object> testHarness = new AbstractStreamOperatorTestHarness<>(
		new StreamSink<>(producer), 1, 1, 0
	);

	testHarness.open();
	assertThat("Open method was not called", serializationSchema.isOpenCalled(), is(true));
}
 
Example 2
Source File: FlinkKafkaConsumerBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testOpen() throws Exception {
	MockDeserializationSchema<Object> deserializationSchema = new MockDeserializationSchema<>();

	AbstractStreamOperatorTestHarness<Object> testHarness = createTestHarness(
		new DummyFlinkKafkaConsumer<>(new KafkaDeserializationSchemaWrapper<>(deserializationSchema)),
		1,
		0
	);

	testHarness.open();
	assertThat("Open method was not called", deserializationSchema.isOpenCalled(), is(true));
}
 
Example 3
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.singletonList("dummy-topic"),
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 4
Source File: ContinuousFileProcessingTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testFunctionRestore() throws Exception {
	String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/";

	org.apache.hadoop.fs.Path path = null;
	long fileModTime = Long.MIN_VALUE;
	for (int i = 0; i < 1; i++) {
		Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line.");
		path = file.f0;
		fileModTime = hdfs.getFileStatus(file.f0).getModificationTime();
	}

	TextInputFormat format = new TextInputFormat(new Path(testBasePath));

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src =
		new StreamSource<>(monitoringFunction);

	final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness =
		new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
	testHarness.open();

	final Throwable[] error = new Throwable[1];

	final OneShotLatch latch = new OneShotLatch();

	final DummySourceContext sourceContext = new DummySourceContext() {
		@Override
		public void collect(TimestampedFileInputSplit element) {
			latch.trigger();
		}
	};

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				monitoringFunction.run(sourceContext);
			}
			catch (Throwable t) {
				t.printStackTrace();
				error[0] = t;
			}
		}
	};
	runner.start();

	// first condition for the source to have updated its state: emit at least one element
	if (!latch.isTriggered()) {
		latch.await();
	}

	// second condition for the source to have updated its state: it's not on the lock anymore,
	// this means it has processed all the splits and updated its state.
	synchronized (sourceContext.getCheckpointLock()) {}

	OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
	monitoringFunction.cancel();
	runner.join();

	testHarness.close();

	final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy =
		createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy =
		new StreamSource<>(monitoringFunctionCopy);

	AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy =
		new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
	testHarnessCopy.initializeState(snapshot);
	testHarnessCopy.open();

	Assert.assertNull(error[0]);
	Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime());

	hdfs.delete(path, false);
}
 
Example 5
Source File: FromElementsFunctionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCheckpointAndRestore() {
	try {
		final int numElements = 10000;

		List<Integer> data = new ArrayList<Integer>(numElements);
		List<Integer> result = new ArrayList<Integer>(numElements);

		for (int i = 0; i < numElements; i++) {
			data.add(i);
		}

		final FromElementsFunction<Integer> source = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> src = new StreamSource<>(source);
		AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
		testHarness.open();

		final SourceFunction.SourceContext<Integer> ctx = new ListSourceContext<Integer>(result, 2L);

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					source.run(ctx);
				}
				catch (Throwable t) {
					error[0] = t;
				}
			}
		};
		runner.start();

		// wait for a bit
		Thread.sleep(1000);

		// make a checkpoint
		List<Integer> checkpointData = new ArrayList<>(numElements);
		OperatorSubtaskState handles = null;
		synchronized (ctx.getCheckpointLock()) {
			handles = testHarness.snapshot(566, System.currentTimeMillis());
			checkpointData.addAll(result);
		}

		// cancel the source
		source.cancel();
		runner.join();

		// check for errors
		if (error[0] != null) {
			System.err.println("Error in asynchronous source runner");
			error[0].printStackTrace();
			fail("Error in asynchronous source runner");
		}

		final FromElementsFunction<Integer> sourceCopy = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> srcCopy = new StreamSource<>(sourceCopy);
		AbstractStreamOperatorTestHarness<Integer> testHarnessCopy =
			new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
		testHarnessCopy.setup();
		testHarnessCopy.initializeState(handles);
		testHarnessCopy.open();

		// recovery run
		SourceFunction.SourceContext<Integer> newCtx = new ListSourceContext<>(checkpointData);

		sourceCopy.run(newCtx);

		assertEquals(data, checkpointData);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 6
Source File: FromElementsFunctionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCheckpointAndRestore() {
	try {
		final int numElements = 10000;

		List<Integer> data = new ArrayList<Integer>(numElements);
		List<Integer> result = new ArrayList<Integer>(numElements);

		for (int i = 0; i < numElements; i++) {
			data.add(i);
		}

		final FromElementsFunction<Integer> source = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> src = new StreamSource<>(source);
		AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
		testHarness.open();

		final SourceFunction.SourceContext<Integer> ctx = new ListSourceContext<Integer>(result, 2L);

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					source.run(ctx);
				}
				catch (Throwable t) {
					error[0] = t;
				}
			}
		};
		runner.start();

		// wait for a bit
		Thread.sleep(1000);

		// make a checkpoint
		List<Integer> checkpointData = new ArrayList<>(numElements);
		OperatorSubtaskState handles = null;
		synchronized (ctx.getCheckpointLock()) {
			handles = testHarness.snapshot(566, System.currentTimeMillis());
			checkpointData.addAll(result);
		}

		// cancel the source
		source.cancel();
		runner.join();

		// check for errors
		if (error[0] != null) {
			System.err.println("Error in asynchronous source runner");
			error[0].printStackTrace();
			fail("Error in asynchronous source runner");
		}

		final FromElementsFunction<Integer> sourceCopy = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> srcCopy = new StreamSource<>(sourceCopy);
		AbstractStreamOperatorTestHarness<Integer> testHarnessCopy =
			new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
		testHarnessCopy.setup();
		testHarnessCopy.initializeState(handles);
		testHarnessCopy.open();

		// recovery run
		SourceFunction.SourceContext<Integer> newCtx = new ListSourceContext<>(checkpointData);

		sourceCopy.run(newCtx);

		assertEquals(data, checkpointData);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 7
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithEmptyState() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	// although the restore state is empty, the fetcher should still have been registered the initial discovered shard;
	// furthermore, the discovered shard should be considered a newly created shard while the job wasn't running,
	// and therefore should be consumed from the earliest sequence number
	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 8
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithReshardedStream() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		// setup the closed shard
		Shard closedShard = new Shard();
		closedShard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange();
		closedSequenceNumberRange.withStartingSequenceNumber("1");
		closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard
		closedShard.setSequenceNumberRange(closedSequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard));

		// setup the new shards
		Shard newSplitShard1 = new Shard();
		newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1));

		SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange();
		newSequenceNumberRange1.withStartingSequenceNumber("1087654322");
		newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1);

		newSplitShard1.setParentShardId(TEST_SHARD_ID);

		Shard newSplitShard2 = new Shard();
		newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2));

		SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange();
		newSequenceNumberRange2.withStartingSequenceNumber("2087654322");
		newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2);

		newSplitShard2.setParentShardId(TEST_SHARD_ID);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1));
		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));

	// assert that the fetcher is registered with all shards, including new shards
	assertEquals(3, fetcher.getSubscribedShardsState().size());

	KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId());
	assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 9
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 10
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception {

		final OneShotLatch latch = new OneShotLatch();
		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);

		doAnswer(new Answer<Void>() {
			@Override
			public Void answer(InvocationOnMock invocation) throws Throwable {
				latch.trigger();
				return null;
			}
		}).when(fetcher).runFetchLoop();

		when(fetcher.snapshotCurrentState()).thenReturn(state);

		final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

		final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

		StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
				new StreamSource<>(consumerFunction);

		final AbstractStreamOperatorTestHarness<String> testHarness =
				new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

		testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

		testHarness.setup();
		testHarness.open();

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					consumerFunction.run(new DummySourceContext() {
						@Override
						public void collect(String element) {

						}
					});
				}
				catch (Throwable t) {
					t.printStackTrace();
					error[0] = t;
				}
			}
		};
		runner.start();

		if (!latch.isTriggered()) {
			latch.await();
		}

		final OperatorSubtaskState snapshot;
		synchronized (testHarness.getCheckpointLock()) {
			snapshot = testHarness.snapshot(0L, 0L);
		}

		OperatorSnapshotUtil.writeStateHandle(snapshot, path);

		consumerOperator.close();
		runner.join();
	}
 
Example 11
Source File: FromElementsFunctionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testCheckpointAndRestore() {
	try {
		final int numElements = 10000;

		List<Integer> data = new ArrayList<Integer>(numElements);
		List<Integer> result = new ArrayList<Integer>(numElements);

		for (int i = 0; i < numElements; i++) {
			data.add(i);
		}

		final FromElementsFunction<Integer> source = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> src = new StreamSource<>(source);
		AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
		testHarness.open();

		final SourceFunction.SourceContext<Integer> ctx = new ListSourceContext<Integer>(result, 2L);

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					source.run(ctx);
				}
				catch (Throwable t) {
					error[0] = t;
				}
			}
		};
		runner.start();

		// wait for a bit
		Thread.sleep(1000);

		// make a checkpoint
		List<Integer> checkpointData = new ArrayList<>(numElements);
		OperatorSubtaskState handles = null;
		synchronized (ctx.getCheckpointLock()) {
			handles = testHarness.snapshot(566, System.currentTimeMillis());
			checkpointData.addAll(result);
		}

		// cancel the source
		source.cancel();
		runner.join();

		// check for errors
		if (error[0] != null) {
			System.err.println("Error in asynchronous source runner");
			error[0].printStackTrace();
			fail("Error in asynchronous source runner");
		}

		final FromElementsFunction<Integer> sourceCopy = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> srcCopy = new StreamSource<>(sourceCopy);
		AbstractStreamOperatorTestHarness<Integer> testHarnessCopy =
			new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
		testHarnessCopy.setup();
		testHarnessCopy.initializeState(handles);
		testHarnessCopy.open();

		// recovery run
		SourceFunction.SourceContext<Integer> newCtx = new ListSourceContext<>(checkpointData);

		sourceCopy.run(newCtx);

		assertEquals(data, checkpointData);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 12
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.x-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 13
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private void writeSnapshot(String path, HashMap<KafkaTopicPartition, Long> state) throws Exception {

		final OneShotLatch latch = new OneShotLatch();
		final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class);

		doAnswer(new Answer<Void>() {
			@Override
			public Void answer(InvocationOnMock invocation) throws Throwable {
				latch.trigger();
				return null;
			}
		}).when(fetcher).runFetchLoop();

		when(fetcher.snapshotCurrentState()).thenReturn(state);

		final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

		final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(fetcher, TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

		StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
				new StreamSource<>(consumerFunction);

		final AbstractStreamOperatorTestHarness<String> testHarness =
				new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

		testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

		testHarness.setup();
		testHarness.open();

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					consumerFunction.run(new DummySourceContext() {
						@Override
						public void collect(String element) {

						}
					});
				}
				catch (Throwable t) {
					t.printStackTrace();
					error[0] = t;
				}
			}
		};
		runner.start();

		if (!latch.isTriggered()) {
			latch.await();
		}

		final OperatorSubtaskState snapshot;
		synchronized (testHarness.getCheckpointLock()) {
			snapshot = testHarness.snapshot(0L, 0L);
		}

		OperatorSnapshotUtil.writeStateHandle(snapshot, path);

		consumerOperator.close();
		runner.join();
	}
 
Example 14
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
private void writeSnapshot(String path, HashMap<StreamShardMetadata, SequenceNumber> state) throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(state.size());
	for (StreamShardMetadata shardMetadata : state.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		state,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumer);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	testHarness.open();

	final AtomicReference<Throwable> error = new AtomicReference<>();

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				consumer.run(new TestSourceContext<>());
			} catch (Throwable t) {
				t.printStackTrace();
				error.set(t);
			}
		}
	};
	runner.start();

	fetcher.waitUntilRun();

	final OperatorSubtaskState snapshot;
	synchronized (testHarness.getCheckpointLock()) {
		snapshot = testHarness.snapshot(0L, 0L);
	}

	OperatorSnapshotUtil.writeStateHandle(snapshot, path);

	consumerOperator.close();
	runner.join();
}
 
Example 15
Source File: FlinkKinesisConsumerMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithReshardedStream() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		// setup the closed shard
		Shard closedShard = new Shard();
		closedShard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange closedSequenceNumberRange = new SequenceNumberRange();
		closedSequenceNumberRange.withStartingSequenceNumber("1");
		closedSequenceNumberRange.withEndingSequenceNumber("1087654321"); // this represents a closed shard
		closedShard.setSequenceNumberRange(closedSequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), closedShard));

		// setup the new shards
		Shard newSplitShard1 = new Shard();
		newSplitShard1.setShardId(KinesisShardIdGenerator.generateFromShardOrder(1));

		SequenceNumberRange newSequenceNumberRange1 = new SequenceNumberRange();
		newSequenceNumberRange1.withStartingSequenceNumber("1087654322");
		newSplitShard1.setSequenceNumberRange(newSequenceNumberRange1);

		newSplitShard1.setParentShardId(TEST_SHARD_ID);

		Shard newSplitShard2 = new Shard();
		newSplitShard2.setShardId(KinesisShardIdGenerator.generateFromShardOrder(2));

		SequenceNumberRange newSequenceNumberRange2 = new SequenceNumberRange();
		newSequenceNumberRange2.withStartingSequenceNumber("2087654322");
		newSplitShard2.setSequenceNumberRange(newSequenceNumberRange2);

		newSplitShard2.setParentShardId(TEST_SHARD_ID);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard1));
		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), newSplitShard2));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState()));

	// assert that the fetcher is registered with all shards, including new shards
	assertEquals(3, fetcher.getSubscribedShardsState().size());

	KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredClosedShardState.getStreamShardHandle().getShard().getShardId());
	assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed());
	assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(1), restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard1.getLastProcessedSequenceNum());

	KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2);
	assertEquals(TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName());
	assertEquals(KinesisShardIdGenerator.generateFromShardOrder(2), restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed());
	// new shards should be consumed from the beginning
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredNewSplitShard2.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 16
Source File: RMQSourceTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCheckpointing() throws Exception {
	source.autoAck = false;

	StreamSource<String, RMQSource<String>> src = new StreamSource<>(source);
	AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
	testHarness.open();

	sourceThread.start();

	Thread.sleep(5);

	final Random random = new Random(System.currentTimeMillis());
	int numSnapshots = 50;
	long previousSnapshotId;
	long lastSnapshotId = 0;

	long totalNumberOfAcks = 0;

	for (int i = 0; i < numSnapshots; i++) {
		long snapshotId = random.nextLong();
		OperatorSubtaskState data;

		synchronized (DummySourceContext.lock) {
			data = testHarness.snapshot(snapshotId, System.currentTimeMillis());
			previousSnapshotId = lastSnapshotId;
			lastSnapshotId = messageId;
		}
		// let some time pass
		Thread.sleep(5);

		// check if the correct number of messages have been snapshotted
		final long numIds = lastSnapshotId - previousSnapshotId;

		RMQTestSource sourceCopy = new RMQTestSource();
		StreamSource<String, RMQTestSource> srcCopy = new StreamSource<>(sourceCopy);
		AbstractStreamOperatorTestHarness<String> testHarnessCopy =
			new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);

		testHarnessCopy.setup();
		testHarnessCopy.initializeState(data);
		testHarnessCopy.open();

		ArrayDeque<Tuple2<Long, Set<String>>> deque = sourceCopy.getRestoredState();
		Set<String> messageIds = deque.getLast().f1;

		assertEquals(numIds, messageIds.size());
		if (messageIds.size() > 0) {
			assertTrue(messageIds.contains(Long.toString(lastSnapshotId)));
		}

		// check if the messages are being acknowledged and the transaction committed
		synchronized (DummySourceContext.lock) {
			source.notifyCheckpointComplete(snapshotId);
		}
		totalNumberOfAcks += numIds;

	}

	Mockito.verify(source.channel, Mockito.times((int) totalNumberOfAcks)).basicAck(Mockito.anyLong(), Mockito.eq(false));
	Mockito.verify(source.channel, Mockito.times(numSnapshots)).txCommit();

}
 
Example 17
Source File: FlinkKinesisConsumerMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestoreWithEmptyState() throws Exception {
	final List<StreamShardHandle> initialDiscoveryShards = new ArrayList<>(TEST_STATE.size());
	for (StreamShardMetadata shardMetadata : TEST_STATE.keySet()) {
		Shard shard = new Shard();
		shard.setShardId(shardMetadata.getShardId());

		SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
		sequenceNumberRange.withStartingSequenceNumber("1");
		shard.setSequenceNumberRange(sequenceNumberRange);

		initialDiscoveryShards.add(new StreamShardHandle(shardMetadata.getStreamName(), shard));
	}

	final TestFetcher<String> fetcher = new TestFetcher<>(
		Collections.singletonList(TEST_STREAM_NAME),
		new TestSourceContext<>(),
		new TestRuntimeContext(true, 1, 0),
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		null,
		initialDiscoveryShards);

	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(
		fetcher, new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot"));
	testHarness.open();

	consumerFunction.run(new TestSourceContext<>());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	// although the restore state is empty, the fetcher should still have been registered the initial discovered shard;
	// furthermore, the discovered shard should be considered a newly created shard while the job wasn't running,
	// and therefore should be consumed from the earliest sequence number
	KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0);
	assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName());
	assertEquals(TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId());
	assertFalse(restoredShardState.getStreamShardHandle().isClosed());
	assertEquals(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), restoredShardState.getLastProcessedSequenceNum());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 18
Source File: FlinkKafkaConsumerBaseMigrationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"));

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}
 
Example 19
Source File: ContinuousFileProcessingMigrationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeMonitoringSourceSnapshot() throws Exception {

	File testFolder = tempFolder.newFolder();

	long fileModTime = Long.MIN_VALUE;
	for (int i = 0; i < 1; i++) {
		Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line.");
		fileModTime = file.f0.lastModified();
	}

	TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath()));

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src =
		new StreamSource<>(monitoringFunction);

	final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);

	testHarness.open();

	final Throwable[] error = new Throwable[1];

	final OneShotLatch latch = new OneShotLatch();

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				monitoringFunction.run(new DummySourceContext() {
					@Override
					public void collect(TimestampedFileInputSplit element) {
						latch.trigger();
					}

					@Override
					public void markAsTemporarilyIdle() {

					}
				});
			}
			catch (Throwable t) {
				t.printStackTrace();
				error[0] = t;
			}
		}
	};
	runner.start();

	if (!latch.isTriggered()) {
		latch.await();
	}

	final OperatorSubtaskState snapshot;
	synchronized (testHarness.getCheckpointLock()) {
		snapshot = testHarness.snapshot(0L, 0L);
	}

	OperatorSnapshotUtil.writeStateHandle(
			snapshot,
			"src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot");

	monitoringFunction.cancel();
	runner.join();

	testHarness.close();
}
 
Example 20
Source File: ContinuousFileProcessingMigrationTest.java    From flink with Apache License 2.0 3 votes vote down vote up
@Test
public void testMonitoringSourceRestore() throws Exception {

	File testFolder = tempFolder.newFolder();

	TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath()));

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src =
		new StreamSource<>(monitoringFunction);

	final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness =
		new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);

	testHarness.setup();

	testHarness.initializeState(
		OperatorSnapshotUtil.getResourceFilename(
			"monitoring-function-migration-test-" + expectedModTime + "-flink" + testMigrateVersion + "-snapshot"));

	testHarness.open();

	Assert.assertEquals((long) expectedModTime, monitoringFunction.getGlobalModificationTime());

}