org.apache.flink.streaming.api.operators.StreamSource Java Examples
The following examples show how to use
org.apache.flink.streaming.api.operators.StreamSource.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testNoMaxWatermarkOnImmediateCancel() throws Exception { StreamSource<String, ?> sourceOperator = new StreamSource<>(new InfiniteSource<>()); StreamTaskTestHarness<String> testHarness = setupSourceStreamTask( sourceOperator, BasicTypeInfo.STRING_TYPE_INFO, true); testHarness.invoke(); try { testHarness.waitForTaskCompletion(); fail("should throw an exception"); } catch (Throwable t) { if (!ExceptionUtils.findThrowable(t, CancelTaskException.class).isPresent()) { throw t; } } assertTrue(testHarness.getOutput().isEmpty()); }
Example #2
Source File: StreamSourceOperatorLatencyMetricsTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private static <T> void setupSourceOperator( StreamSource<T, ?> operator, ExecutionConfig executionConfig, Environment env, ProcessingTimeService timeProvider) { StreamConfig cfg = new StreamConfig(new Configuration()); cfg.setStateBackend(new MemoryStateBackend()); cfg.setTimeCharacteristic(TimeCharacteristic.EventTime); cfg.setOperatorID(new OperatorID()); try { MockStreamTask mockTask = new MockStreamTaskBuilder(env) .setConfig(cfg) .setExecutionConfig(executionConfig) .setProcessingTimeService(timeProvider) .build(); operator.setup(mockTask, cfg, (Output<StreamRecord<T>>) mock(Output.class)); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #3
Source File: StreamSourceOperatorWatermarksTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testNoMaxWatermarkOnImmediateCancel() throws Exception { final List<StreamElement> output = new ArrayList<>(); // regular stream source operator final StreamSource<String, InfiniteSource<String>> operator = new StreamSource<>(new InfiniteSource<String>()); setupSourceOperator(operator, TimeCharacteristic.EventTime, 0); operator.cancel(); // run and exit operator.run(new Object(), mock(StreamStatusMaintainer.class), new CollectorOutput<String>(output)); assertTrue(output.isEmpty()); }
Example #4
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testNoMaxWatermarkOnImmediateCancel() throws Exception { final List<StreamElement> output = new ArrayList<>(); // regular stream source operator final StreamSource<String, InfiniteSource<String>> operator = new StreamSource<>(new InfiniteSource<String>()); setupSourceOperator(operator, TimeCharacteristic.EventTime, 0); operator.cancel(); // run and exit OperatorChain<?, ?> operatorChain = createOperatorChain(operator); try { operator.run(new Object(), mock(StreamStatusMaintainer.class), new CollectorOutput<String>(output), operatorChain); } finally { operatorChain.releaseOutputs(); } assertTrue(output.isEmpty()); }
Example #5
Source File: BatchExecutorTest.java From flink with Apache License 2.0 | 6 votes |
public BatchExecutorTest() { batchExecutor = new BatchExecutor(LocalStreamEnvironment.getExecutionEnvironment()); final Transformation testTransform = new LegacySourceTransformation<>( "MockTransform", new StreamSource<>(new SourceFunction<String>() { @Override public void run(SourceContext<String> ctx) { } @Override public void cancel() { } }), BasicTypeInfo.STRING_TYPE_INFO, 1); Pipeline pipeline = batchExecutor.createPipeline( Collections.singletonList(testTransform), new TableConfig(), "Test Job"); streamGraph = (StreamGraph) pipeline; }
Example #6
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testEmitMaxWatermarkForFiniteSource() throws Exception { // regular stream source operator StreamSource<String, FiniteSource<String>> operator = new StreamSource<>(new FiniteSource<String>()); final List<StreamElement> output = new ArrayList<>(); setupSourceOperator(operator, TimeCharacteristic.EventTime, 0); OperatorChain<?, ?> operatorChain = createOperatorChain(operator); try { operator.run(new Object(), mock(StreamStatusMaintainer.class), new CollectorOutput<String>(output), operatorChain); } finally { operatorChain.releaseOutputs(); } assertEquals(1, output.size()); assertEquals(Watermark.MAX_WATERMARK, output.get(0)); }
Example #7
Source File: StreamSourceOperatorLatencyMetricsTest.java From flink with Apache License 2.0 | 6 votes |
private static <T> void setupSourceOperator( StreamSource<T, ?> operator, ExecutionConfig executionConfig, Environment env, ProcessingTimeService timeProvider) { StreamConfig cfg = new StreamConfig(new Configuration()); cfg.setStateBackend(new MemoryStateBackend()); cfg.setTimeCharacteristic(TimeCharacteristic.EventTime); cfg.setOperatorID(new OperatorID()); try { MockStreamTask mockTask = new MockStreamTaskBuilder(env) .setConfig(cfg) .setExecutionConfig(executionConfig) .setProcessingTimeService(timeProvider) .build(); operator.setup(mockTask, cfg, (Output<StreamRecord<T>>) mock(Output.class)); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #8
Source File: SourceTaskTerminationTest.java From flink with Apache License 2.0 | 6 votes |
private StreamTaskTestHarness<Long> getSourceStreamTaskTestHarness() { final StreamTaskTestHarness<Long> testHarness = new StreamTaskTestHarness<>( SourceStreamTask::new, BasicTypeInfo.LONG_TYPE_INFO); final LockStepSourceWithOneWmPerElement source = new LockStepSourceWithOneWmPerElement(); testHarness.setupOutputForSingletonOperatorChain(); testHarness.getExecutionConfig().setLatencyTrackingInterval(-1); StreamConfig streamConfig = testHarness.getStreamConfig(); StreamSource<Long, ?> sourceOperator = new StreamSource<>(source); streamConfig.setStreamOperator(sourceOperator); streamConfig.setOperatorID(new OperatorID()); return testHarness; }
Example #9
Source File: SourceTaskTerminationTest.java From flink with Apache License 2.0 | 6 votes |
private StreamTaskTestHarness<Long> getSourceStreamTaskTestHarness() { final StreamTaskTestHarness<Long> testHarness = new StreamTaskTestHarness<>( SourceStreamTask::new, BasicTypeInfo.LONG_TYPE_INFO); final LockStepSourceWithOneWmPerElement source = new LockStepSourceWithOneWmPerElement(); testHarness.setupOutputForSingletonOperatorChain(); testHarness.getExecutionConfig().setLatencyTrackingInterval(-1); StreamConfig streamConfig = testHarness.getStreamConfig(); StreamSource<Long, ?> sourceOperator = new StreamSource<>(source); streamConfig.setStreamOperator(sourceOperator); streamConfig.setOperatorID(new OperatorID()); return testHarness; }
Example #10
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testMaxWatermarkIsForwardedLastForFiniteSource() throws Exception { StreamSource<String, ?> sourceOperator = new StreamSource<>(new FiniteSource(true)); StreamTaskTestHarness<String> testHarness = setupSourceStreamTask(sourceOperator, BasicTypeInfo.STRING_TYPE_INFO); testHarness.invoke(); testHarness.waitForTaskCompletion(); ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); expectedOutput.add(new StreamRecord<>("Hello")); expectedOutput.add(Watermark.MAX_WATERMARK); TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput()); }
Example #11
Source File: SourceStreamTaskTest.java From flink with Apache License 2.0 | 6 votes |
/** * If finishing a task doesn't swallow exceptions this test would fail with an exception. */ @Test public void finishingIgnoresExceptions() throws Exception { final StreamTaskTestHarness<String> testHarness = new StreamTaskTestHarness<>( SourceStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO); final CompletableFuture<Void> operatorRunningWaitingFuture = new CompletableFuture<>(); ExceptionThrowingSource.setIsInRunLoopFuture(operatorRunningWaitingFuture); testHarness.setupOutputForSingletonOperatorChain(); StreamConfig streamConfig = testHarness.getStreamConfig(); streamConfig.setStreamOperator(new StreamSource<>(new ExceptionThrowingSource())); streamConfig.setOperatorID(new OperatorID()); testHarness.invoke(); operatorRunningWaitingFuture.get(); testHarness.getTask().finishTask(); testHarness.waitForTaskCompletion(); }
Example #12
Source File: StreamExecutionEnvironment.java From flink with Apache License 2.0 | 6 votes |
/** * Ads a data source with a custom type information thus opening a * {@link DataStream}. Only in very special cases does the user need to * support type information. Otherwise use * {@link #addSource(org.apache.flink.streaming.api.functions.source.SourceFunction)} * * @param function * the user defined function * @param sourceName * Name of the data source * @param <OUT> * type of the returned stream * @param typeInfo * the user defined type information for the stream * @return the data stream constructed */ @SuppressWarnings("unchecked") public <OUT> DataStreamSource<OUT> addSource(SourceFunction<OUT> function, String sourceName, TypeInformation<OUT> typeInfo) { if (function instanceof ResultTypeQueryable) { typeInfo = ((ResultTypeQueryable<OUT>) function).getProducedType(); } if (typeInfo == null) { try { typeInfo = TypeExtractor.createTypeInfo( SourceFunction.class, function.getClass(), 0, null, null); } catch (final InvalidTypesException e) { typeInfo = (TypeInformation<OUT>) new MissingTypeInfo(sourceName, e); } } boolean isParallel = function instanceof ParallelSourceFunction; clean(function); final StreamSource<OUT, ?> sourceOperator = new StreamSource<>(function); return new DataStreamSource<>(this, typeInfo, sourceOperator, isParallel, sourceName); }
Example #13
Source File: InterruptSensitiveRestoreTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void testRestoreWithInterrupt(int mode) throws Exception { IN_RESTORE_LATCH.reset(); Configuration taskConfig = new Configuration(); StreamConfig cfg = new StreamConfig(taskConfig); cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); switch (mode) { case OPERATOR_MANAGED: case OPERATOR_RAW: case KEYED_MANAGED: case KEYED_RAW: cfg.setStateKeySerializer(IntSerializer.INSTANCE); cfg.setStreamOperator(new StreamSource<>(new TestSource(mode))); break; default: throw new IllegalArgumentException(); } StreamStateHandle lockingHandle = new InterruptLockingStateHandle(); Task task = createTask(cfg, taskConfig, lockingHandle, mode); // start the task and wait until it is in "restore" task.startTaskThread(); IN_RESTORE_LATCH.await(); // trigger cancellation and signal to continue task.cancelExecution(); task.getExecutingThread().join(30000); if (task.getExecutionState() == ExecutionState.CANCELING) { fail("Task is stuck and not canceling"); } assertEquals(ExecutionState.CANCELED, task.getExecutionState()); assertNull(task.getFailureCause()); }
Example #14
Source File: FlinkKafkaConsumerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness( SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception { AbstractStreamOperatorTestHarness<T> testHarness = new AbstractStreamOperatorTestHarness<>( new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex); testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime); return testHarness; }
Example #15
Source File: StreamSources.java From beam with Apache License 2.0 | 5 votes |
public static <OutT, SrcT extends SourceFunction<OutT>> void run( StreamSource<OutT, SrcT> streamSource, Object lockingObject, StreamStatusMaintainer streamStatusMaintainer, Output<StreamRecord<OutT>> collector) throws Exception { streamSource.run( lockingObject, streamStatusMaintainer, collector, createOperatorChain(streamSource)); }
Example #16
Source File: DataStreamSource.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public DataStreamSource(StreamExecutionEnvironment environment, TypeInformation<T> outTypeInfo, StreamSource<T, ?> operator, boolean isParallel, String sourceName) { super(environment, new SourceTransformation<>(sourceName, operator, outTypeInfo, environment.getParallelism())); this.isParallel = isParallel; if (!isParallel) { setParallelism(1); } }
Example #17
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
Example #18
Source File: DataStreamSource.java From flink with Apache License 2.0 | 5 votes |
/** * The constructor used to create legacy sources. */ public DataStreamSource( StreamExecutionEnvironment environment, TypeInformation<T> outTypeInfo, StreamSource<T, ?> operator, boolean isParallel, String sourceName) { super(environment, new LegacySourceTransformation<>(sourceName, operator, outTypeInfo, environment.getParallelism())); this.isParallel = isParallel; if (!isParallel) { setParallelism(1); } }
Example #19
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private static <T> MockStreamTask setupSourceOperator( StreamSource<T, ?> operator, TimeCharacteristic timeChar, long watermarkInterval, final TimerService timeProvider) throws Exception { ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setAutoWatermarkInterval(watermarkInterval); StreamConfig cfg = new StreamConfig(new Configuration()); cfg.setStateBackend(new MemoryStateBackend()); cfg.setTimeCharacteristic(timeChar); cfg.setOperatorID(new OperatorID()); Environment env = new DummyEnvironment("MockTwoInputTask", 1, 0); StreamStatusMaintainer streamStatusMaintainer = mock(StreamStatusMaintainer.class); when(streamStatusMaintainer.getStreamStatus()).thenReturn(StreamStatus.ACTIVE); MockStreamTask mockTask = new MockStreamTaskBuilder(env) .setConfig(cfg) .setExecutionConfig(executionConfig) .setStreamStatusMaintainer(streamStatusMaintainer) .setTimerService(timeProvider) .build(); operator.setup(mockTask, cfg, (Output<StreamRecord<T>>) mock(Output.class)); return mockTask; }
Example #20
Source File: RMQSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testOpen() throws Exception { MockDeserializationSchema<String> deserializationSchema = new MockDeserializationSchema<>(); RMQSource<String> consumer = new RMQTestSource(deserializationSchema); AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>( new StreamSource<>(consumer), 1, 1, 0 ); testHarness.open(); assertThat("Open method was not called", deserializationSchema.isOpenCalled(), is(true)); }
Example #21
Source File: FlinkKafkaConsumerBaseMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
Example #22
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testAutomaticWatermarkContext() throws Exception { // regular stream source operator final StreamSource<String, InfiniteSource<String>> operator = new StreamSource<>(new InfiniteSource<>()); long watermarkInterval = 10; TestProcessingTimeService processingTimeService = new TestProcessingTimeService(); processingTimeService.setCurrentTime(0); setupSourceOperator(operator, TimeCharacteristic.IngestionTime, watermarkInterval, processingTimeService); final List<StreamElement> output = new ArrayList<>(); StreamSourceContexts.getSourceContext(TimeCharacteristic.IngestionTime, operator.getContainingTask().getProcessingTimeService(), operator.getContainingTask().getCheckpointLock(), operator.getContainingTask().getStreamStatusMaintainer(), new CollectorOutput<String>(output), operator.getExecutionConfig().getAutoWatermarkInterval(), -1); // periodically emit the watermarks // even though we start from 1 the watermark are still // going to be aligned with the watermark interval. for (long i = 1; i < 100; i += watermarkInterval) { processingTimeService.setCurrentTime(i); } assertTrue(output.size() == 9); long nextWatermark = 0; for (StreamElement el : output) { nextWatermark += watermarkInterval; Watermark wm = (Watermark) el; assertTrue(wm.getTimestamp() == nextWatermark); } }
Example #23
Source File: StreamSourceOperatorWatermarksTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private static <T> void setupSourceOperator(StreamSource<T, ?> operator, TimeCharacteristic timeChar, long watermarkInterval, final ProcessingTimeService timeProvider) throws Exception { ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setAutoWatermarkInterval(watermarkInterval); StreamConfig cfg = new StreamConfig(new Configuration()); cfg.setStateBackend(new MemoryStateBackend()); cfg.setTimeCharacteristic(timeChar); cfg.setOperatorID(new OperatorID()); Environment env = new DummyEnvironment("MockTwoInputTask", 1, 0); StreamStatusMaintainer streamStatusMaintainer = mock(StreamStatusMaintainer.class); when(streamStatusMaintainer.getStreamStatus()).thenReturn(StreamStatus.ACTIVE); MockStreamTask mockTask = new MockStreamTaskBuilder(env) .setConfig(cfg) .setExecutionConfig(executionConfig) .setStreamStatusMaintainer(streamStatusMaintainer) .setProcessingTimeService(timeProvider) .build(); operator.setup(mockTask, cfg, (Output<StreamRecord<T>>) mock(Output.class)); }
Example #24
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
Example #25
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestore() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // on restore, subscribedPartitionsToStartOffsets should be identical to the restored state assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets()); // assert that state is correctly restored from legacy checkpoint assertTrue(consumerFunction.getRestoredState() != null); assertEquals(PARTITION_STATE, consumerFunction.getRestoredState()); consumerOperator.close(); consumerOperator.cancel(); }
Example #26
Source File: FlinkKafkaConsumerBaseTest.java From flink with Apache License 2.0 | 5 votes |
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness( SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception { AbstractStreamOperatorTestHarness<T> testHarness = new AbstractStreamOperatorTestHarness<>( new StreamSource<>(source), maxParallelism, numSubtasks, subtaskIndex); testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime); return testHarness; }
Example #27
Source File: FlinkKafkaConsumerBaseMigrationTest.java From flink with Apache License 2.0 | 5 votes |
/** * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be * found for topics. */ @Test public void testRestore() throws Exception { final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(TOPICS, partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); testHarness.open(); // assert that there are partitions and is identical to expected list assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // on restore, subscribedPartitionsToStartOffsets should be identical to the restored state assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets()); // assert that state is correctly restored from legacy checkpoint assertTrue(consumerFunction.getRestoredState() != null); assertEquals(PARTITION_STATE, consumerFunction.getRestoredState()); consumerOperator.close(); consumerOperator.cancel(); }
Example #28
Source File: DataStreamSource.java From flink with Apache License 2.0 | 5 votes |
public DataStreamSource(StreamExecutionEnvironment environment, TypeInformation<T> outTypeInfo, StreamSource<T, ?> operator, boolean isParallel, String sourceName) { super(environment, new SourceTransformation<>(sourceName, operator, outTypeInfo, environment.getParallelism())); this.isParallel = isParallel; if (!isParallel) { setParallelism(1); } }
Example #29
Source File: InterruptSensitiveRestoreTest.java From flink with Apache License 2.0 | 5 votes |
private void testRestoreWithInterrupt(int mode) throws Exception { IN_RESTORE_LATCH.reset(); Configuration taskConfig = new Configuration(); StreamConfig cfg = new StreamConfig(taskConfig); cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); switch (mode) { case OPERATOR_MANAGED: case OPERATOR_RAW: case KEYED_MANAGED: case KEYED_RAW: cfg.setStateKeySerializer(IntSerializer.INSTANCE); cfg.setStreamOperator(new StreamSource<>(new TestSource(mode))); break; default: throw new IllegalArgumentException(); } StreamStateHandle lockingHandle = new InterruptLockingStateHandle(); Task task = createTask(cfg, taskConfig, lockingHandle, mode); // start the task and wait until it is in "restore" task.startTaskThread(); IN_RESTORE_LATCH.await(); // trigger cancellation and signal to continue task.cancelExecution(); task.getExecutingThread().join(30000); if (task.getExecutionState() == ExecutionState.CANCELING) { fail("Task is stuck and not canceling"); } assertEquals(ExecutionState.CANCELED, task.getExecutionState()); assertNull(task.getFailureCause()); }
Example #30
Source File: StreamSourceOperatorWatermarksTest.java From flink with Apache License 2.0 | 5 votes |
private static <T> StreamTaskTestHarness<T> setupSourceStreamTask( StreamSource<T, ?> sourceOperator, TypeInformation<T> outputType, final boolean cancelImmediatelyAfterCreation) { final StreamTaskTestHarness<T> testHarness = new StreamTaskTestHarness<>( (env) -> { SourceStreamTask<T, ?, ?> sourceTask = new SourceStreamTask<>(env); if (cancelImmediatelyAfterCreation) { try { sourceTask.cancel(); } catch (Exception e) { throw new RuntimeException(e); } } return sourceTask; }, outputType); testHarness.setupOutputForSingletonOperatorChain(); StreamConfig streamConfig = testHarness.getStreamConfig(); streamConfig.setStreamOperator(sourceOperator); streamConfig.setOperatorID(new OperatorID()); streamConfig.setTimeCharacteristic(TimeCharacteristic.EventTime); return testHarness; }