Java Code Examples for org.apache.flink.runtime.jobgraph.JobGraph#getVerticesSortedTopologicallyFromSources()

The following examples show how to use org.apache.flink.runtime.jobgraph.JobGraph#getVerticesSortedTopologicallyFromSources() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PythonScalarFunctionOperatorTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testPythonScalarFunctionOperatorIsChainedByDefault() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	StreamTableEnvironment tEnv = createTableEnvironment(env);
	tEnv.getConfig().getConfiguration().setString(
		TaskManagerOptions.TASK_OFF_HEAP_MEMORY.key(), "80mb");
	tEnv.registerFunction("pyFunc", new PythonScalarFunction("pyFunc"));
	DataStream<Tuple2<Integer, Integer>> ds = env.fromElements(new Tuple2<>(1, 2));
	Table t = tEnv.fromDataStream(ds, $("a"), $("b")).select(call("pyFunc", $("a"), $("b")));
	// force generating the physical plan for the given table
	tEnv.toAppendStream(t, BasicTypeInfo.INT_TYPE_INFO);
	JobGraph jobGraph = env.getStreamGraph().getJobGraph();
	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
	Assert.assertEquals(1, vertices.size());
}
 
Example 2
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testSlotSharingOnAllVerticesInSameSlotSharingGroupByDefaultDisabled() {
	final StreamGraph streamGraph = createStreamGraphForSlotSharingTest();
	streamGraph.setAllVerticesInSameSlotSharingGroupByDefault(false);
	final JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);

	final List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(4, verticesSorted.size());

	final List<JobVertex> verticesMatched = getExpectedVerticesList(verticesSorted);
	final JobVertex source1Vertex = verticesMatched.get(0);
	final JobVertex source2Vertex = verticesMatched.get(1);
	final JobVertex map1Vertex = verticesMatched.get(2);
	final JobVertex map2Vertex = verticesMatched.get(3);

	// vertices in the same region should be in the same slot sharing group
	assertSameSlotSharingGroup(source1Vertex, map1Vertex);

	// vertices in different regions should be in different slot sharing groups
	assertDistinctSharingGroups(source1Vertex, source2Vertex, map2Vertex);
}
 
Example 3
Source File: BackPressureITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void operatorsBecomeBackPressured() throws Exception {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment()
		.setParallelism(1);

	env.addSource(new InfiniteIntegerSource())
		.slotSharingGroup("sourceGroup")
		.map(new IdentityMapFunction<>())
		.slotSharingGroup("mapGroup")
		.addSink(new BlockingSink<>())
		.slotSharingGroup("sinkGroup");

	final JobGraph jobGraph = env.getStreamGraph().getJobGraph(TEST_JOB_ID);

	final List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
	final JobVertex sourceJobVertex = vertices.get(0);
	final JobVertex mapJobVertex = vertices.get(1);

	testingMiniCluster.submitJob(jobGraph).get();

	assertJobVertexSubtasksAreBackPressured(mapJobVertex);
	assertJobVertexSubtasksAreBackPressured(sourceJobVertex);
}
 
Example 4
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void skipDeploymentIfVertexVersionOutdated() {
	testExecutionSlotAllocator.disableAutoCompletePendingRequests();

	final JobGraph jobGraph = nonParallelSourceSinkJobGraph();
	final List<JobVertex> sortedJobVertices = jobGraph.getVerticesSortedTopologicallyFromSources();
	final ExecutionVertexID sourceExecutionVertexId = new ExecutionVertexID(sortedJobVertices.get(0).getID(), 0);
	final ExecutionVertexID sinkExecutionVertexId = new ExecutionVertexID(sortedJobVertices.get(1).getID(), 0);

	final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
	testExecutionSlotAllocator.completePendingRequest(sourceExecutionVertexId);

	final ArchivedExecutionVertex sourceExecutionVertex = scheduler.requestJob().getAllExecutionVertices().iterator().next();
	final ExecutionAttemptID attemptId = sourceExecutionVertex.getCurrentExecutionAttempt().getAttemptId();
	scheduler.updateTaskExecutionState(new TaskExecutionState(jobGraph.getJobID(), attemptId, ExecutionState.FAILED));
	testRestartBackoffTimeStrategy.setCanRestart(false);

	testExecutionSlotAllocator.enableAutoCompletePendingRequests();
	taskRestartExecutor.triggerScheduledTasks();

	assertThat(testExecutionVertexOperations.getDeployedVertices(), containsInAnyOrder(sourceExecutionVertexId, sinkExecutionVertexId));
	assertThat(scheduler.requestJob().getState(), is(equalTo(JobStatus.RUNNING)));
}
 
Example 5
Source File: BackPressureITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void operatorsBecomeBackPressured() throws Exception {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment()
		.setParallelism(1);

	env.addSource(new InfiniteIntegerSource())
		.slotSharingGroup("sourceGroup")
		.map(new IdentityMapFunction<>())
		.slotSharingGroup("mapGroup")
		.addSink(new BlockingSink<>())
		.slotSharingGroup("sinkGroup");

	final JobGraph jobGraph = env.getStreamGraph().getJobGraph(TEST_JOB_ID);

	final List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
	final JobVertex sourceJobVertex = vertices.get(0);
	final JobVertex mapJobVertex = vertices.get(1);

	testingMiniCluster.submitJob(jobGraph).get();

	assertJobVertexSubtasksAreBackPressured(mapJobVertex);
	assertJobVertexSubtasksAreBackPressured(sourceJobVertex);
}
 
Example 6
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test enabling the property "blockingConnectionsBetweenChains".
 */
@Test
public void testBlockingConnectionsBetweenChainsEnabled() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Filter -> Map -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	// partition transformation with an undefined shuffle mode between source and filter
	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
		sourceDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.UNDEFINED));
	DataStream<Integer> filterDataStream = partitionAfterSourceDataStream.filter(value -> true).setParallelism(2);

	DataStream<Integer> partitionAfterFilterDataStream = new DataStream<>(env, new PartitionTransformation<>(
		filterDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.UNDEFINED));
	partitionAfterFilterDataStream.map(value -> value).setParallelism(2);

	DataStream<Integer> partitionAfterMapDataStream = new DataStream<>(env, new PartitionTransformation<>(
		filterDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.PIPELINED));
	partitionAfterMapDataStream.print().setParallelism(1);

	StreamGraph streamGraph = env.getStreamGraph();
	streamGraph.setBlockingConnectionsBetweenChains(true);
	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(3, verticesSorted.size());

	JobVertex sourceVertex = verticesSorted.get(0);
	// still can be chained
	JobVertex filterAndMapVertex = verticesSorted.get(1);
	JobVertex printVertex = verticesSorted.get(2);

	// the edge with undefined shuffle mode is translated into BLOCKING
	assertEquals(ResultPartitionType.BLOCKING, sourceVertex.getProducedDataSets().get(0).getResultType());
	// the edge with PIPELINED shuffle mode is translated into PIPELINED_BOUNDED
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, filterAndMapVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, printVertex.getInputs().get(0).getSource().getResultType());
}
 
Example 7
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testUnalignedCheckAndAtLeastOnce() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.fromElements(0).print();
	StreamGraph streamGraph = env.getStreamGraph();
	env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
	env.getCheckpointConfig().enableUnalignedCheckpoints(true);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	StreamConfig streamConfig = new StreamConfig(verticesSorted.get(0).getConfiguration());
	assertEquals(CheckpointingMode.AT_LEAST_ONCE, streamConfig.getCheckpointMode());
	assertFalse(streamConfig.isUnalignedCheckpointsEnabled());
}
 
Example 8
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that "blockingConnectionsBetweenChains" is off by default.
 */
@Test
public void testBlockingAfterChainingOffDisabled() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Filter -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	// partition transformation with an undefined shuffle mode between source and filter
	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
		sourceDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.UNDEFINED));
	DataStream<Integer> filterDataStream = partitionAfterSourceDataStream.filter(value -> true).setParallelism(2);

	DataStream<Integer> partitionAfterFilterDataStream = new DataStream<>(env, new PartitionTransformation<>(
		filterDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.UNDEFINED));

	partitionAfterFilterDataStream.print().setParallelism(2);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(2, verticesSorted.size());

	JobVertex sourceVertex = verticesSorted.get(0);
	JobVertex filterAndPrintVertex = verticesSorted.get(1);

	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED,
			filterAndPrintVertex.getInputs().get(0).getSource().getResultType());
}
 
Example 9
Source File: StreamingJobGraphGeneratorWithGlobalDataExchangeModeTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPointwiseEdgesPipelinedMode() {
	final StreamGraph streamGraph = createStreamGraph();
	streamGraph.setGlobalDataExchangeMode(GlobalDataExchangeMode.POINTWISE_EDGES_PIPELINED);
	final JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);

	final List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	final JobVertex sourceVertex = verticesSorted.get(0);
	final JobVertex map1Vertex = verticesSorted.get(1);
	final JobVertex map2Vertex = verticesSorted.get(2);

	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, map1Vertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.BLOCKING, map2Vertex.getProducedDataSets().get(0).getResultType());
}
 
Example 10
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test setting shuffle mode to {@link ShuffleMode#BATCH}.
 */
@Test
public void testShuffleModeBatch() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Map -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
			sourceDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.BATCH));
	DataStream<Integer> mapDataStream = partitionAfterSourceDataStream.map(value -> value).setParallelism(1);

	DataStream<Integer> partitionAfterMapDataStream = new DataStream<>(env, new PartitionTransformation<>(
			mapDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.BATCH));
	partitionAfterMapDataStream.print().setParallelism(2);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(3, verticesSorted.size());

	// it can not be chained with BATCH shuffle mode
	JobVertex sourceVertex = verticesSorted.get(0);
	JobVertex mapVertex = verticesSorted.get(1);

	// BATCH shuffle mode is translated into BLOCKING result partition
	assertEquals(ResultPartitionType.BLOCKING,
		sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.BLOCKING,
		mapVertex.getProducedDataSets().get(0).getResultType());
}
 
Example 11
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testEnabledUnalignedCheckAndDisabledCheckpointing() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.fromElements(0).print();
	StreamGraph streamGraph = env.getStreamGraph();
	assertFalse("Checkpointing enabled", streamGraph.getCheckpointConfig().isCheckpointingEnabled());
	env.getCheckpointConfig().enableUnalignedCheckpoints(true);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	StreamConfig streamConfig = new StreamConfig(verticesSorted.get(0).getConfiguration());
	assertEquals(CheckpointingMode.AT_LEAST_ONCE, streamConfig.getCheckpointMode());
	assertFalse(streamConfig.isUnalignedCheckpointsEnabled());
}
 
Example 12
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test setting shuffle mode to {@link ShuffleMode#PIPELINED}.
 */
@Test
public void testShuffleModePipelined() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Map -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
			sourceDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.PIPELINED));
	DataStream<Integer> mapDataStream = partitionAfterSourceDataStream.map(value -> value).setParallelism(1);

	DataStream<Integer> partitionAfterMapDataStream = new DataStream<>(env, new PartitionTransformation<>(
			mapDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.PIPELINED));
	partitionAfterMapDataStream.print().setParallelism(2);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(2, verticesSorted.size());

	// it can be chained with PIPELINED shuffle mode
	JobVertex sourceAndMapVertex = verticesSorted.get(0);

	// PIPELINED shuffle mode is translated into PIPELINED_BOUNDED result partition
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED,
			sourceAndMapVertex.getProducedDataSets().get(0).getResultType());
}
 
Example 13
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that the chain start/end is correctly set.
 */
@Test
public void testChainStartEndSetting() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	// set parallelism to 2 to avoid chaining with source in case when available processors is 1.
	env.setParallelism(2);

	// fromElements -> CHAIN(Map -> Print)
	env.fromElements(1, 2, 3)
		.map(new MapFunction<Integer, Integer>() {
			@Override
			public Integer map(Integer value) throws Exception {
				return value;
			}
		})
		.print();
	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	JobVertex sourceVertex = verticesSorted.get(0);
	JobVertex mapPrintVertex = verticesSorted.get(1);

	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, mapPrintVertex.getInputs().get(0).getSource().getResultType());

	StreamConfig sourceConfig = new StreamConfig(sourceVertex.getConfiguration());
	StreamConfig mapConfig = new StreamConfig(mapPrintVertex.getConfiguration());
	Map<Integer, StreamConfig> chainedConfigs = mapConfig.getTransitiveChainedTaskConfigs(getClass().getClassLoader());
	StreamConfig printConfig = chainedConfigs.values().iterator().next();

	assertTrue(sourceConfig.isChainStart());
	assertTrue(sourceConfig.isChainEnd());

	assertTrue(mapConfig.isChainStart());
	assertFalse(mapConfig.isChainEnd());

	assertFalse(printConfig.isChainStart());
	assertTrue(printConfig.isChainEnd());
}
 
Example 14
Source File: SlotAllocationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testTwoPipelines() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	FilterFunction<Long> dummyFilter = new FilterFunction<Long>() {
		@Override
		public boolean filter(Long value) {
			return false;
		}
	};

	env.generateSequence(1, 10)
		.filter(dummyFilter).slotSharingGroup("isolated")
		.filter(dummyFilter).slotSharingGroup("default").disableChaining()
		.filter(dummyFilter).slotSharingGroup("group 1")
		.filter(dummyFilter).startNewChain()
		.print().disableChaining();

	// verify that a second pipeline does not inherit the groups from the first pipeline
	env.generateSequence(1, 10)
			.filter(dummyFilter).slotSharingGroup("isolated-2")
			.filter(dummyFilter).slotSharingGroup("default").disableChaining()
			.filter(dummyFilter).slotSharingGroup("group 2")
			.filter(dummyFilter).startNewChain()
			.print().disableChaining();

	JobGraph jobGraph = env.getStreamGraph().getJobGraph();

	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();

	assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(3).getSlotSharingGroup());
	assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
	assertNotEquals(vertices.get(3).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
	assertEquals(vertices.get(4).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
	assertEquals(vertices.get(5).getSlotSharingGroup(), vertices.get(6).getSlotSharingGroup());

	int pipelineStart = 6;
	assertEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(pipelineStart + 2).getSlotSharingGroup());
	assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(pipelineStart + 1).getSlotSharingGroup());
	assertNotEquals(vertices.get(pipelineStart + 2).getSlotSharingGroup(), vertices.get(pipelineStart + 3).getSlotSharingGroup());
	assertEquals(vertices.get(pipelineStart + 3).getSlotSharingGroup(), vertices.get(pipelineStart + 4).getSlotSharingGroup());
	assertEquals(vertices.get(pipelineStart + 4).getSlotSharingGroup(), vertices.get(pipelineStart + 5).getSlotSharingGroup());

}
 
Example 15
Source File: JobMasterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testDuplicatedKvStateRegistrationsFailTask() throws Exception {
	final JobGraph graph = createKvJobGraph();
	final List<JobVertex> jobVertices = graph.getVerticesSortedTopologicallyFromSources();
	final JobVertex vertex1 = jobVertices.get(0);
	final JobVertex vertex2 = jobVertices.get(1);

	final JobMaster jobMaster = createJobMaster(
		configuration,
		graph,
		haServices,
		new TestingJobManagerSharedServicesBuilder().build(),
		heartbeatServices);

	CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId);
	final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);

	try {
		// wait for the start to complete
		startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS);

		// duplicate registration fails task

		// register a KvState
		final String registrationName = "duplicate-me";
		final KvStateID kvStateID = new KvStateID();
		final KeyGroupRange keyGroupRange = new KeyGroupRange(0, 0);
		final InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 4396);

		jobMasterGateway.notifyKvStateRegistered(
			graph.getJobID(),
			vertex1.getID(),
			keyGroupRange,
			registrationName,
			kvStateID,
			address).get();

		try {
			jobMasterGateway.notifyKvStateRegistered(
				graph.getJobID(),
				vertex2.getID(), // <--- different operator, but...
				keyGroupRange,
				registrationName,  // ...same name
				kvStateID,
				address).get();
			fail("Expected to fail because of clashing registration message.");
		} catch (Exception e) {
			assertTrue(ExceptionUtils.findThrowableWithMessage(e, "Registration name clash").isPresent());
			assertEquals(JobStatus.FAILED, jobMasterGateway.requestJobStatus(testingTimeout).get());
		}
	} finally {
		RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
	}
}
 
Example 16
Source File: JobMasterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testRegisterAndUnregisterKvState() throws Exception {
	final JobGraph graph = createKvJobGraph();
	final List<JobVertex> jobVertices = graph.getVerticesSortedTopologicallyFromSources();
	final JobVertex vertex1 = jobVertices.get(0);

	final JobMaster jobMaster = createJobMaster(
		configuration,
		graph,
		haServices,
		new TestingJobManagerSharedServicesBuilder().build(),
		heartbeatServices);

	CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId);
	final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);

	try {
		// wait for the start to complete
		startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS);

		// register a KvState
		final String registrationName = "register-me";
		final KvStateID kvStateID = new KvStateID();
		final KeyGroupRange keyGroupRange = new KeyGroupRange(0, 0);
		final InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 1029);

		jobMasterGateway.notifyKvStateRegistered(
			graph.getJobID(),
			vertex1.getID(),
			keyGroupRange,
			registrationName,
			kvStateID,
			address).get();

		final KvStateLocation location = jobMasterGateway.requestKvStateLocation(graph.getJobID(), registrationName).get();

		assertEquals(graph.getJobID(), location.getJobId());
		assertEquals(vertex1.getID(), location.getJobVertexId());
		assertEquals(vertex1.getMaxParallelism(), location.getNumKeyGroups());
		assertEquals(1, location.getNumRegisteredKeyGroups());
		assertEquals(1, keyGroupRange.getNumberOfKeyGroups());
		assertEquals(kvStateID, location.getKvStateID(keyGroupRange.getStartKeyGroup()));
		assertEquals(address, location.getKvStateServerAddress(keyGroupRange.getStartKeyGroup()));

		// unregister the KvState
		jobMasterGateway.notifyKvStateUnregistered(
			graph.getJobID(),
			vertex1.getID(),
			keyGroupRange,
			registrationName).get();

		try {
			jobMasterGateway.requestKvStateLocation(graph.getJobID(), registrationName).get();
			fail("Expected to fail with an UnknownKvStateLocation.");
		} catch (Exception e) {
			assertTrue(ExceptionUtils.findThrowable(e, UnknownKvStateLocation.class).isPresent());
		}
	} finally {
		RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
	}
}
 
Example 17
Source File: SlotAllocationTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testTwoPipelines() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	FilterFunction<Long> dummyFilter = new FilterFunction<Long>() {
		@Override
		public boolean filter(Long value) {
			return false;
		}
	};

	env.generateSequence(1, 10)
		.filter(dummyFilter).slotSharingGroup("isolated")
		.filter(dummyFilter).slotSharingGroup("default").disableChaining()
		.filter(dummyFilter).slotSharingGroup("group 1")
		.filter(dummyFilter).startNewChain()
		.print().disableChaining();

	// verify that a second pipeline does not inherit the groups from the first pipeline
	env.generateSequence(1, 10)
			.filter(dummyFilter).slotSharingGroup("isolated-2")
			.filter(dummyFilter).slotSharingGroup("default").disableChaining()
			.filter(dummyFilter).slotSharingGroup("group 2")
			.filter(dummyFilter).startNewChain()
			.print().disableChaining();

	JobGraph jobGraph = env.getStreamGraph().getJobGraph();

	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();

	assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(3).getSlotSharingGroup());
	assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
	assertNotEquals(vertices.get(3).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
	assertEquals(vertices.get(4).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
	assertEquals(vertices.get(5).getSlotSharingGroup(), vertices.get(6).getSlotSharingGroup());

	int pipelineStart = 6;
	assertEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(pipelineStart + 2).getSlotSharingGroup());
	assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(pipelineStart + 1).getSlotSharingGroup());
	assertNotEquals(vertices.get(pipelineStart + 2).getSlotSharingGroup(), vertices.get(pipelineStart + 3).getSlotSharingGroup());
	assertEquals(vertices.get(pipelineStart + 3).getSlotSharingGroup(), vertices.get(pipelineStart + 4).getSlotSharingGroup());
	assertEquals(vertices.get(pipelineStart + 4).getSlotSharingGroup(), vertices.get(pipelineStart + 5).getSlotSharingGroup());

}
 
Example 18
Source File: JobMasterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRegisterAndUnregisterKvState() throws Exception {
	final JobGraph graph = createKvJobGraph();
	final List<JobVertex> jobVertices = graph.getVerticesSortedTopologicallyFromSources();
	final JobVertex vertex1 = jobVertices.get(0);

	final JobMaster jobMaster = createJobMaster(
		configuration,
		graph,
		haServices,
		new TestingJobManagerSharedServicesBuilder().build(),
		heartbeatServices);

	CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId);
	final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);

	try {
		// wait for the start to complete
		startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS);

		// register a KvState
		final String registrationName = "register-me";
		final KvStateID kvStateID = new KvStateID();
		final KeyGroupRange keyGroupRange = new KeyGroupRange(0, 0);
		final InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 1029);

		jobMasterGateway.notifyKvStateRegistered(
			graph.getJobID(),
			vertex1.getID(),
			keyGroupRange,
			registrationName,
			kvStateID,
			address).get();

		final KvStateLocation location = jobMasterGateway.requestKvStateLocation(graph.getJobID(), registrationName).get();

		assertEquals(graph.getJobID(), location.getJobId());
		assertEquals(vertex1.getID(), location.getJobVertexId());
		assertEquals(vertex1.getMaxParallelism(), location.getNumKeyGroups());
		assertEquals(1, location.getNumRegisteredKeyGroups());
		assertEquals(1, keyGroupRange.getNumberOfKeyGroups());
		assertEquals(kvStateID, location.getKvStateID(keyGroupRange.getStartKeyGroup()));
		assertEquals(address, location.getKvStateServerAddress(keyGroupRange.getStartKeyGroup()));

		// unregister the KvState
		jobMasterGateway.notifyKvStateUnregistered(
			graph.getJobID(),
			vertex1.getID(),
			keyGroupRange,
			registrationName).get();

		try {
			jobMasterGateway.requestKvStateLocation(graph.getJobID(), registrationName).get();
			fail("Expected to fail with an UnknownKvStateLocation.");
		} catch (Exception e) {
			assertTrue(ExceptionUtils.findThrowable(e, UnknownKvStateLocation.class).isPresent());
		}
	} finally {
		RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
	}
}
 
Example 19
Source File: SlotAllocationTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testTwoPipelines() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	FilterFunction<Long> dummyFilter = new FilterFunction<Long>() {
		@Override
		public boolean filter(Long value) {
			return false;
		}
	};

	env.generateSequence(1, 10)
		.filter(dummyFilter).slotSharingGroup("isolated")
		.filter(dummyFilter).slotSharingGroup("default").disableChaining()
		.filter(dummyFilter).slotSharingGroup("group 1")
		.filter(dummyFilter).startNewChain()
		.print().disableChaining();

	// verify that a second pipeline does not inherit the groups from the first pipeline
	env.generateSequence(1, 10)
			.filter(dummyFilter).slotSharingGroup("isolated-2")
			.filter(dummyFilter).slotSharingGroup("default").disableChaining()
			.filter(dummyFilter).slotSharingGroup("group 2")
			.filter(dummyFilter).startNewChain()
			.print().disableChaining();

	JobGraph jobGraph = env.getStreamGraph().getJobGraph();

	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();

	assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(3).getSlotSharingGroup());
	assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
	assertNotEquals(vertices.get(3).getSlotSharingGroup(), vertices.get(4).getSlotSharingGroup());
	assertEquals(vertices.get(4).getSlotSharingGroup(), vertices.get(5).getSlotSharingGroup());
	assertEquals(vertices.get(5).getSlotSharingGroup(), vertices.get(6).getSlotSharingGroup());

	int pipelineStart = 6;
	assertEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(pipelineStart + 2).getSlotSharingGroup());
	assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(pipelineStart + 1).getSlotSharingGroup());
	assertNotEquals(vertices.get(pipelineStart + 2).getSlotSharingGroup(), vertices.get(pipelineStart + 3).getSlotSharingGroup());
	assertEquals(vertices.get(pipelineStart + 3).getSlotSharingGroup(), vertices.get(pipelineStart + 4).getSlotSharingGroup());
	assertEquals(vertices.get(pipelineStart + 4).getSlotSharingGroup(), vertices.get(pipelineStart + 5).getSlotSharingGroup());

}
 
Example 20
Source File: JobMasterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testDuplicatedKvStateRegistrationsFailTask() throws Exception {
	final JobGraph graph = createKvJobGraph();
	final List<JobVertex> jobVertices = graph.getVerticesSortedTopologicallyFromSources();
	final JobVertex vertex1 = jobVertices.get(0);
	final JobVertex vertex2 = jobVertices.get(1);

	final JobMaster jobMaster = createJobMaster(
		configuration,
		graph,
		haServices,
		new TestingJobManagerSharedServicesBuilder().build(),
		heartbeatServices);

	CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId);
	final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);

	try {
		// wait for the start to complete
		startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS);

		// duplicate registration fails task

		// register a KvState
		final String registrationName = "duplicate-me";
		final KvStateID kvStateID = new KvStateID();
		final KeyGroupRange keyGroupRange = new KeyGroupRange(0, 0);
		final InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 4396);

		jobMasterGateway.notifyKvStateRegistered(
			graph.getJobID(),
			vertex1.getID(),
			keyGroupRange,
			registrationName,
			kvStateID,
			address).get();

		try {
			jobMasterGateway.notifyKvStateRegistered(
				graph.getJobID(),
				vertex2.getID(), // <--- different operator, but...
				keyGroupRange,
				registrationName,  // ...same name
				kvStateID,
				address).get();
			fail("Expected to fail because of clashing registration message.");
		} catch (Exception e) {
			assertTrue(ExceptionUtils.findThrowableWithMessage(e, "Registration name clash").isPresent());
			assertEquals(JobStatus.FAILED, jobMaster.getExecutionGraph().getState());
		}
	} finally {
		RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
	}
}