Java Code Examples for org.apache.flink.streaming.api.functions.sink.DiscardingSink
The following examples show how to use
org.apache.flink.streaming.api.functions.sink.DiscardingSink. These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus Source File: TimestampITCase.java License: Apache License 2.0 | 6 votes |
/** * These check whether timestamps are properly assigned at the sources and handled in * network transmission and between chained operators when timestamps are enabled. */ @Test public void testTimestampHandling() throws Exception { final int numElements = 10; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); env.setParallelism(PARALLELISM); env.getConfig().disableSysoutLogging(); DataStream<Integer> source1 = env.addSource(new MyTimestampSource(0L, numElements)); DataStream<Integer> source2 = env.addSource(new MyTimestampSource(0L, numElements)); source1 .map(new IdentityMap()) .connect(source2).map(new IdentityCoMap()) .transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new TimestampCheckingOperator()) .addSink(new DiscardingSink<Integer>()); env.execute(); }
Example 2
Source Project: Flink-CEPplus Source File: TimestampITCase.java License: Apache License 2.0 | 6 votes |
/** * These check whether timestamps are properly ignored when they are disabled. */ @Test public void testDisabledTimestamps() throws Exception { final int numElements = 10; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); env.setParallelism(PARALLELISM); env.getConfig().disableSysoutLogging(); DataStream<Integer> source1 = env.addSource(new MyNonWatermarkingSource(numElements)); DataStream<Integer> source2 = env.addSource(new MyNonWatermarkingSource(numElements)); source1 .map(new IdentityMap()) .connect(source2).map(new IdentityCoMap()) .transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new DisabledTimestampCheckingOperator()) .addSink(new DiscardingSink<Integer>()); env.execute(); }
Example 3
Source Project: Flink-CEPplus Source File: StatefulStreamingJob.java License: Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { final ParameterTool pt = ParameterTool.fromArgs(args); final String checkpointDir = pt.getRequired("checkpoint.dir"); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStateBackend(new FsStateBackend(checkpointDir)); env.setRestartStrategy(RestartStrategies.noRestart()); env.enableCheckpointing(1000L); env.getConfig().disableGenericTypes(); env.addSource(new MySource()).uid("my-source") .keyBy(anInt -> 0) .map(new MyStatefulFunction()).uid("my-map") .addSink(new DiscardingSink<>()).uid("my-sink"); env.execute(); }
Example 4
Source Project: flink Source File: CheckpointExceptionHandlerConfigurationTest.java License: Apache License 2.0 | 6 votes |
public void doTestPropagationFromCheckpointConfig(boolean failTaskOnCheckpointErrors) { StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment(); streamExecutionEnvironment.setParallelism(1); streamExecutionEnvironment.getCheckpointConfig().setCheckpointInterval(1000); streamExecutionEnvironment.getCheckpointConfig().setFailOnCheckpointingErrors(failTaskOnCheckpointErrors); streamExecutionEnvironment.addSource(new SourceFunction<Integer>() { @Override public void run(SourceContext<Integer> ctx) { } @Override public void cancel() { } }).addSink(new DiscardingSink<>()); }
Example 5
Source Project: Flink-CEPplus Source File: StreamGraphGeneratorTest.java License: Apache License 2.0 | 6 votes |
/** * Test whether an {@link OutputTypeConfigurable} implementation gets called with the correct * output type. In this test case the output type must be BasicTypeInfo.INT_TYPE_INFO. */ @Test public void testOutputTypeConfigurationWithOneInputTransformation() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> source = env.fromElements(1, 10); OutputTypeConfigurableOperationWithOneInput outputTypeConfigurableOperation = new OutputTypeConfigurableOperationWithOneInput(); DataStream<Integer> result = source.transform( "Single input and output type configurable operation", BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation); result.addSink(new DiscardingSink<>()); env.getStreamGraph(); assertEquals(BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation.getTypeInformation()); }
Example 6
Source Project: Flink-CEPplus Source File: StreamGraphGeneratorTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that the max parallelism is properly set for connected * streams. */ @Test public void testMaxParallelismWithConnectedKeyedStream() { int maxParallelism = 42; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> input1 = env.fromElements(1, 2, 3, 4).setMaxParallelism(128); DataStream<Integer> input2 = env.fromElements(1, 2, 3, 4).setMaxParallelism(129); env.getConfig().setMaxParallelism(maxParallelism); DataStream<Integer> keyedResult = input1 .connect(input2) .keyBy(value -> value, value -> value) .map(new NoOpIntCoMap()); keyedResult.addSink(new DiscardingSink<>()); StreamGraph graph = env.getStreamGraph(); StreamNode keyedResultNode = graph.getStreamNode(keyedResult.getId()); StreamPartitioner<?> streamPartitioner1 = keyedResultNode.getInEdges().get(0).getPartitioner(); StreamPartitioner<?> streamPartitioner2 = keyedResultNode.getInEdges().get(1).getPartitioner(); }
Example 7
Source Project: Flink-CEPplus Source File: CheckpointExceptionHandlerConfigurationTest.java License: Apache License 2.0 | 6 votes |
public void doTestPropagationFromCheckpointConfig(boolean failTaskOnCheckpointErrors) throws Exception { StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment(); streamExecutionEnvironment.setParallelism(1); streamExecutionEnvironment.getCheckpointConfig().setCheckpointInterval(1000); streamExecutionEnvironment.getCheckpointConfig().setFailOnCheckpointingErrors(failTaskOnCheckpointErrors); streamExecutionEnvironment.addSource(new SourceFunction<Integer>() { @Override public void run(SourceContext<Integer> ctx) throws Exception { } @Override public void cancel() { } }).addSink(new DiscardingSink<>()); StreamGraph streamGraph = streamExecutionEnvironment.getStreamGraph(); JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph); SerializedValue<ExecutionConfig> serializedExecutionConfig = jobGraph.getSerializedExecutionConfig(); ExecutionConfig executionConfig = serializedExecutionConfig.deserializeValue(Thread.currentThread().getContextClassLoader()); Assert.assertEquals(failTaskOnCheckpointErrors, executionConfig.isFailTaskOnCheckpointError()); }
Example 8
Source Project: flink Source File: StreamingJobGraphGeneratorNodeHashTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that there are no collisions with two identical intermediate nodes connected to the * same predecessor. * * <pre> * /-> [ (map) ] -> [ (sink) ] * [ (src) ] -+ * \-> [ (map) ] -> [ (sink) ] * </pre> */ @Test public void testNodeHashIdenticalNodes() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(); env.setParallelism(4); env.disableOperatorChaining(); DataStream<String> src = env.addSource(new NoOpSourceFunction()); src.map(new NoOpMapFunction()).addSink(new DiscardingSink<>()); src.map(new NoOpMapFunction()).addSink(new DiscardingSink<>()); JobGraph jobGraph = env.getStreamGraph().getJobGraph(); Set<JobVertexID> vertexIds = new HashSet<>(); for (JobVertex vertex : jobGraph.getVertices()) { assertTrue(vertexIds.add(vertex.getID())); } }
Example 9
Source Project: flink Source File: SavepointITCase.java License: Apache License 2.0 | 6 votes |
/** * Creates a streaming JobGraph from the StreamEnvironment. */ private JobGraph createJobGraph( int parallelism, int numberOfRetries, long restartDelay) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(parallelism); env.disableOperatorChaining(); env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(numberOfRetries, restartDelay)); env.getConfig().disableSysoutLogging(); DataStream<Integer> stream = env .addSource(new InfiniteTestSource()) .shuffle() .map(new StatefulCounter()); stream.addSink(new DiscardingSink<>()); return env.getStreamGraph().getJobGraph(); }
Example 10
Source Project: flink Source File: ContinuousFileReaderOperatorITCase.java License: Apache License 2.0 | 6 votes |
@Test public void testEndInput() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); final File sourceFile = TEMPORARY_FOLDER.newFile(); final int elementCount = 10000; try (PrintWriter printWriter = new PrintWriter(sourceFile)) { for (int i = 0; i < elementCount; i++) { printWriter.println(i); } } DataStreamSource<String> source = env.readTextFile(sourceFile.getAbsolutePath()); // check the endInput is invoked at the right time TestBoundedOneInputStreamOperator checkingOperator = new TestBoundedOneInputStreamOperator(elementCount); DataStream<String> endInputChecking = source.transform("EndInputChecking", STRING_TYPE_INFO, checkingOperator); endInputChecking.addSink(new DiscardingSink<>()); env.execute("ContinuousFileReaderOperatorITCase.testEndInput"); }
Example 11
Source Project: flink Source File: TimestampITCase.java License: Apache License 2.0 | 6 votes |
/** * These check whether timestamps are properly assigned at the sources and handled in * network transmission and between chained operators when timestamps are enabled. */ @Test public void testTimestampHandling() throws Exception { final int numElements = 10; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); env.setParallelism(PARALLELISM); env.getConfig().disableSysoutLogging(); DataStream<Integer> source1 = env.addSource(new MyTimestampSource(0L, numElements)); DataStream<Integer> source2 = env.addSource(new MyTimestampSource(0L, numElements)); source1 .map(new IdentityMap()) .connect(source2).map(new IdentityCoMap()) .transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new TimestampCheckingOperator()) .addSink(new DiscardingSink<Integer>()); env.execute(); }
Example 12
Source Project: flink Source File: TimestampITCase.java License: Apache License 2.0 | 6 votes |
/** * These check whether timestamps are properly ignored when they are disabled. */ @Test public void testDisabledTimestamps() throws Exception { final int numElements = 10; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); env.setParallelism(PARALLELISM); env.getConfig().disableSysoutLogging(); DataStream<Integer> source1 = env.addSource(new MyNonWatermarkingSource(numElements)); DataStream<Integer> source2 = env.addSource(new MyNonWatermarkingSource(numElements)); source1 .map(new IdentityMap()) .connect(source2).map(new IdentityCoMap()) .transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new DisabledTimestampCheckingOperator()) .addSink(new DiscardingSink<Integer>()); env.execute(); }
Example 13
Source Project: flink Source File: OperatorIDGeneratorTest.java License: Apache License 2.0 | 6 votes |
private static OperatorID getOperatorID() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env .fromElements(1, 2, 3) .uid(UID).name(OPERATOR_NAME) .disableChaining() .addSink(new DiscardingSink<>()); JobGraph graph = env.getStreamGraph().getJobGraph(new JobID()); JobVertex vertex = StreamSupport.stream(graph.getVertices().spliterator(), false) .filter(node -> node.getName().contains(OPERATOR_NAME)) .findFirst() .orElseThrow(() -> new IllegalStateException("Unable to find vertex")); return vertex.getOperatorIDs().get(0); }
Example 14
Source Project: flink Source File: StatefulStreamingJob.java License: Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { final ParameterTool pt = ParameterTool.fromArgs(args); final String checkpointDir = pt.getRequired("checkpoint.dir"); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStateBackend(new FsStateBackend(checkpointDir)); env.setRestartStrategy(RestartStrategies.noRestart()); env.enableCheckpointing(1000L); env.getConfig().disableGenericTypes(); env.addSource(new MySource()).uid("my-source") .keyBy(anInt -> 0) .map(new MyStatefulFunction()).uid("my-map") .addSink(new DiscardingSink<>()).uid("my-sink"); env.execute(); }
Example 15
Source Project: flink Source File: AsyncWaitOperatorTest.java License: Apache License 2.0 | 6 votes |
/** * Test for the temporary fix to FLINK-13063. */ @Test public void testAsyncOperatorIsNeverChained() { StreamExecutionEnvironment chainEnv = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> input = chainEnv.fromElements(1); input = AsyncDataStream.orderedWait( input, new LazyAsyncFunction(), TIMEOUT, TimeUnit.MILLISECONDS, 6).map((x) -> x); AsyncDataStream.unorderedWait( input, new MyAsyncFunction(), TIMEOUT, TimeUnit.MILLISECONDS, 3).map((x) -> x).addSink(new DiscardingSink<>()); final JobGraph jobGraph = chainEnv.getStreamGraph().getJobGraph(); Assert.assertEquals(3, jobGraph.getVerticesSortedTopologicallyFromSources().size()); }
Example 16
Source Project: flink Source File: StreamGraphGeneratorTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that the max parallelism is properly set for connected * streams. */ @Test public void testMaxParallelismWithConnectedKeyedStream() { int maxParallelism = 42; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> input1 = env.fromElements(1, 2, 3, 4).setMaxParallelism(128); DataStream<Integer> input2 = env.fromElements(1, 2, 3, 4).setMaxParallelism(129); env.getConfig().setMaxParallelism(maxParallelism); DataStream<Integer> keyedResult = input1 .connect(input2) .keyBy(value -> value, value -> value) .map(new NoOpIntCoMap()); keyedResult.addSink(new DiscardingSink<>()); StreamGraph graph = env.getStreamGraph(); StreamNode keyedResultNode = graph.getStreamNode(keyedResult.getId()); StreamPartitioner<?> streamPartitioner1 = keyedResultNode.getInEdges().get(0).getPartitioner(); StreamPartitioner<?> streamPartitioner2 = keyedResultNode.getInEdges().get(1).getPartitioner(); }
Example 17
Source Project: flink Source File: StreamingJobGraphGeneratorNodeHashTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that there are no collisions with two identical sources. * * <pre> * [ (src0) ] --\ * +--> [ (sink) ] * [ (src1) ] --/ * </pre> */ @Test public void testNodeHashIdenticalSources() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(); env.setParallelism(4); env.disableOperatorChaining(); DataStream<String> src0 = env.addSource(new NoOpSourceFunction()); DataStream<String> src1 = env.addSource(new NoOpSourceFunction()); src0.union(src1).addSink(new DiscardingSink<>()); JobGraph jobGraph = env.getStreamGraph().getJobGraph(); List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources(); assertTrue(vertices.get(0).isInputVertex()); assertTrue(vertices.get(1).isInputVertex()); assertNotNull(vertices.get(0).getID()); assertNotNull(vertices.get(1).getID()); assertNotEquals(vertices.get(0).getID(), vertices.get(1).getID()); }
Example 18
Source Project: flink Source File: StreamGraphGeneratorTest.java License: Apache License 2.0 | 6 votes |
/** * Test whether an {@link OutputTypeConfigurable} implementation gets called with the correct * output type. In this test case the output type must be BasicTypeInfo.INT_TYPE_INFO. */ @Test public void testOutputTypeConfigurationWithOneInputTransformation() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> source = env.fromElements(1, 10); OutputTypeConfigurableOperationWithOneInput outputTypeConfigurableOperation = new OutputTypeConfigurableOperationWithOneInput(); DataStream<Integer> result = source.transform( "Single input and output type configurable operation", BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation); result.addSink(new DiscardingSink<>()); env.getStreamGraph(); assertEquals(BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation.getTypeInformation()); }
Example 19
Source Project: Flink-CEPplus Source File: KafkaShortRetentionTestBase.java License: Apache License 2.0 | 5 votes |
/** * Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none". */ public void runFailOnAutoOffsetResetNone() throws Exception { final String topic = "auto-offset-reset-none-test"; final int parallelism = 1; kafkaServer.createTestTopic(topic, parallelism, 1); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(parallelism); env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately env.getConfig().disableSysoutLogging(); // ----------- add consumer ---------- Properties customProps = new Properties(); customProps.putAll(standardProps); customProps.putAll(secureProps); customProps.setProperty("auto.offset.reset", "none"); // test that "none" leads to an exception FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps); DataStreamSource<String> consuming = env.addSource(source); consuming.addSink(new DiscardingSink<String>()); try { env.execute("Test auto offset reset none"); } catch (Throwable e) { // check if correct exception has been thrown if (!e.getCause().getCause().getMessage().contains("Unable to find previous offset") // kafka 0.8 && !e.getCause().getCause().getMessage().contains("Undefined offset with no reset policy for partition") // kafka 0.9 ) { throw e; } } kafkaServer.deleteTestTopic(topic); }
Example 20
Source Project: Flink-CEPplus Source File: RescalingITCase.java License: Apache License 2.0 | 5 votes |
private static JobGraph createJobGraphWithOperatorState( int parallelism, int maxParallelism, OperatorCheckpointMethod checkpointMethod) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(parallelism); env.getConfig().setMaxParallelism(maxParallelism); env.enableCheckpointing(Long.MAX_VALUE); env.setRestartStrategy(RestartStrategies.noRestart()); StateSourceBase.workStartedLatch = new CountDownLatch(parallelism); SourceFunction<Integer> src; switch (checkpointMethod) { case CHECKPOINTED_FUNCTION: src = new PartitionedStateSource(false); break; case CHECKPOINTED_FUNCTION_BROADCAST: src = new PartitionedStateSource(true); break; case LIST_CHECKPOINTED: src = new PartitionedStateSourceListCheckpointed(); break; case NON_PARTITIONED: src = new NonPartitionedStateSource(); break; default: throw new IllegalArgumentException(); } DataStream<Integer> input = env.addSource(src); input.addSink(new DiscardingSink<Integer>()); return env.getStreamGraph().getJobGraph(); }
Example 21
Source Project: flink Source File: StreamGraphGeneratorTest.java License: Apache License 2.0 | 5 votes |
/** * Tests that the max parallelism is automatically set to the parallelism if it has not been * specified. */ @Test public void testAutoMaxParallelism() { int globalParallelism = 42; int mapParallelism = 17; int maxParallelism = 21; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(globalParallelism); DataStream<Integer> source = env.fromElements(1, 2, 3); DataStream<Integer> keyedResult1 = source.keyBy(value -> value).map(new NoOpIntMap()); DataStream<Integer> keyedResult2 = keyedResult1.keyBy(value -> value).map(new NoOpIntMap()).setParallelism(mapParallelism); DataStream<Integer> keyedResult3 = keyedResult2.keyBy(value -> value).map(new NoOpIntMap()).setMaxParallelism(maxParallelism); DataStream<Integer> keyedResult4 = keyedResult3.keyBy(value -> value).map(new NoOpIntMap()).setMaxParallelism(maxParallelism).setParallelism(mapParallelism); keyedResult4.addSink(new DiscardingSink<>()); StreamGraph graph = env.getStreamGraph(); StreamNode keyedResult3Node = graph.getStreamNode(keyedResult3.getId()); StreamNode keyedResult4Node = graph.getStreamNode(keyedResult4.getId()); assertEquals(maxParallelism, keyedResult3Node.getMaxParallelism()); assertEquals(maxParallelism, keyedResult4Node.getMaxParallelism()); }
Example 22
Source Project: flink Source File: YarnTestJob.java License: Apache License 2.0 | 5 votes |
public static JobGraph stoppableJob(final StopJobSignal stopJobSignal) { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.addSource(new InfiniteSourceFunction(stopJobSignal)) .setParallelism(2) .shuffle() .addSink(new DiscardingSink<>()) .setParallelism(2); return env.getStreamGraph().getJobGraph(); }
Example 23
Source Project: Flink-CEPplus Source File: ChainedRuntimeContextITCase.java License: Apache License 2.0 | 5 votes |
@Test public void test() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.addSource(new TestSource()).map(new TestMap()).addSink(new DiscardingSink<Integer>()); env.execute(); assertNotEquals(srcContext, mapContext); }
Example 24
Source Project: Flink-CEPplus Source File: TestJob.java License: Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final DataStreamSource<Integer> source = env.fromElements(1, 2, 3, 4); final SingleOutputStreamOperator<Integer> mapper = source.map(element -> 2 * element); mapper.addSink(new DiscardingSink<>()); ParameterTool parameterTool = ParameterTool.fromArgs(args); env.execute(TestJob.class.getCanonicalName() + "-" + parameterTool.getRequired("arg")); }
Example 25
Source Project: Flink-CEPplus Source File: DistributedCacheDfsTest.java License: Apache License 2.0 | 5 votes |
@Test public void testDistributeFileViaDFS() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.registerCachedFile(testFile.toString(), "test_data", false); env.registerCachedFile(testDir.toString(), "test_dir", false); env.fromElements(1) .map(new TestMapFunction()) .addSink(new DiscardingSink<>()); env.execute("Distributed Cache Via Blob Test Program"); }
Example 26
Source Project: Flink-CEPplus Source File: StreamExecutionEnvironmentTest.java License: Apache License 2.0 | 5 votes |
@Test public void testSources() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); SourceFunction<Integer> srcFun = new SourceFunction<Integer>() { private static final long serialVersionUID = 1L; @Override public void run(SourceContext<Integer> ctx) throws Exception { } @Override public void cancel() { } }; DataStreamSource<Integer> src1 = env.addSource(srcFun); src1.addSink(new DiscardingSink<Integer>()); assertEquals(srcFun, getFunctionFromDataSource(src1)); List<Long> list = Arrays.asList(0L, 1L, 2L); DataStreamSource<Long> src2 = env.generateSequence(0, 2); assertTrue(getFunctionFromDataSource(src2) instanceof StatefulSequenceSource); DataStreamSource<Long> src3 = env.fromElements(0L, 1L, 2L); assertTrue(getFunctionFromDataSource(src3) instanceof FromElementsFunction); DataStreamSource<Long> src4 = env.fromCollection(list); assertTrue(getFunctionFromDataSource(src4) instanceof FromElementsFunction); }
Example 27
Source Project: Flink-CEPplus Source File: StreamExecutionEnvironmentTest.java License: Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private static <T> SourceFunction<T> getFunctionFromDataSource(DataStreamSource<T> dataStreamSource) { dataStreamSource.addSink(new DiscardingSink<T>()); AbstractUdfStreamOperator<?, ?> operator = (AbstractUdfStreamOperator<?, ?>) getOperatorFromDataStream(dataStreamSource); return (SourceFunction<T>) operator.getUserFunction(); }
Example 28
Source Project: flink Source File: StreamingJobGraphGeneratorNodeHashTest.java License: Apache License 2.0 | 5 votes |
/** * Tests that a manual hash for an intermediate chain node is accepted. */ @Test public void testManualHashAssignmentForIntermediateNodeInChain() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(); env.setParallelism(4); env.addSource(new NoOpSourceFunction()) // Intermediate chained node .map(new NoOpMapFunction()).uid("map") .addSink(new DiscardingSink<>()); env.getStreamGraph().getJobGraph(); }
Example 29
Source Project: Flink-CEPplus Source File: StreamGraphGeneratorTest.java License: Apache License 2.0 | 5 votes |
/** * Tests that the global and operator-wide max parallelism setting is respected. */ @Test public void testMaxParallelismForwarding() { int globalMaxParallelism = 42; int keyedResult2MaxParallelism = 17; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.getConfig().setMaxParallelism(globalMaxParallelism); DataStream<Integer> source = env.fromElements(1, 2, 3); DataStream<Integer> keyedResult1 = source.keyBy(value -> value).map(new NoOpIntMap()); DataStream<Integer> keyedResult2 = keyedResult1 .keyBy(value -> value) .map(new NoOpIntMap()) .setMaxParallelism(keyedResult2MaxParallelism); keyedResult2.addSink(new DiscardingSink<>()); StreamGraph graph = env.getStreamGraph(); StreamNode keyedResult1Node = graph.getStreamNode(keyedResult1.getId()); StreamNode keyedResult2Node = graph.getStreamNode(keyedResult2.getId()); assertEquals(globalMaxParallelism, keyedResult1Node.getMaxParallelism()); assertEquals(keyedResult2MaxParallelism, keyedResult2Node.getMaxParallelism()); }
Example 30
Source Project: Flink-CEPplus Source File: YarnTestJob.java License: Apache License 2.0 | 5 votes |
public static JobGraph stoppableJob(final StopJobSignal stopJobSignal) { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.addSource(new InfiniteSourceFunction(stopJobSignal)) .setParallelism(2) .shuffle() .addSink(new DiscardingSink<>()) .setParallelism(2); return env.getStreamGraph().getJobGraph(); }