org.apache.flink.runtime.executiongraph.ExecutionGraph Java Examples

The following examples show how to use org.apache.flink.runtime.executiongraph.ExecutionGraph. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that validates that a graph with single unconnected vertices works correctly.
 * 
 * <pre>
 *     (v1)
 *     
 *     (v2)
 *     
 *     (v3)
 *     
 *     ...
 * </pre>
 */
@Test
public void testIndividualVertices() throws Exception {
	final JobVertex source1 = new JobVertex("source1");
	source1.setInvokableClass(NoOpInvokable.class);
	source1.setParallelism(2);

	final JobVertex source2 = new JobVertex("source2");
	source2.setInvokableClass(NoOpInvokable.class);
	source2.setParallelism(2);

	final JobGraph jobGraph = new JobGraph("test job", source1, source2);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion sourceRegion11 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source1.getID()).getTaskVertices()[0]);
	FailoverRegion sourceRegion12 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source1.getID()).getTaskVertices()[1]);
	FailoverRegion targetRegion21 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source2.getID()).getTaskVertices()[0]);
	FailoverRegion targetRegion22 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source2.getID()).getTaskVertices()[1]);

	assertTrue(sourceRegion11 != sourceRegion12);
	assertTrue(sourceRegion12 != targetRegion21);
	assertTrue(targetRegion21 != targetRegion22);
}
 
Example #2
Source File: ExecutionGraphCheckpointCoordinatorTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the checkpoint coordinator is shut down if the execution graph
 * is failed.
 */
@Test
public void testShutdownCheckpointCoordinatorOnFailure() throws Exception {
	final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>();
	CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture);

	final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>();
	CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture);

	ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store);
	final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator();

	assertThat(checkpointCoordinator, Matchers.notNullValue());
	assertThat(checkpointCoordinator.isShutdown(), is(false));

	graph.failGlobal(new Exception("Test Exception"));

	assertThat(checkpointCoordinator.isShutdown(), is(true));
	assertThat(counterShutdownFuture.get(), is(JobStatus.FAILED));
	assertThat(storeShutdownFuture.get(), is(JobStatus.FAILED));
}
 
Example #3
Source File: ExecutionGraphCheckpointCoordinatorTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the checkpoint coordinator is shut down if the execution graph
 * is suspended.
 */
@Test
public void testShutdownCheckpointCoordinatorOnSuspend() throws Exception {
	final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>();
	CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture);

	final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>();
	CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture);

	ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store);
	final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator();

	assertThat(checkpointCoordinator, Matchers.notNullValue());
	assertThat(checkpointCoordinator.isShutdown(), is(false));

	graph.suspend(new Exception("Test Exception"));

	assertThat(checkpointCoordinator.isShutdown(), is(true));
	assertThat(counterShutdownFuture.get(), is(JobStatus.SUSPENDED));
	assertThat(storeShutdownFuture.get(), is(JobStatus.SUSPENDED));
}
 
Example #4
Source File: ExecutionGraphCheckpointCoordinatorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the checkpoint coordinator is shut down if the execution graph
 * is suspended.
 */
@Test
public void testShutdownCheckpointCoordinatorOnSuspend() throws Exception {
	final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>();
	CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture);

	final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>();
	CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture);

	ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store);
	final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator();

	assertThat(checkpointCoordinator, Matchers.notNullValue());
	assertThat(checkpointCoordinator.isShutdown(), is(false));

	graph.suspend(new Exception("Test Exception"));

	assertThat(checkpointCoordinator.isShutdown(), is(true));
	assertThat(counterShutdownFuture.get(), is(JobStatus.SUSPENDED));
	assertThat(storeShutdownFuture.get(), is(JobStatus.SUSPENDED));
}
 
Example #5
Source File: DefaultFailoverTopology.java    From flink with Apache License 2.0 6 votes vote down vote up
public DefaultFailoverTopology(ExecutionGraph executionGraph) {
	checkNotNull(executionGraph);

	this.containsCoLocationConstraints = executionGraph.getAllVertices().values().stream()
		.map(ExecutionJobVertex::getCoLocationGroup)
		.anyMatch(Objects::nonNull);

	// generate vertices
	this.failoverVertices = new ArrayList<>();
	final Map<ExecutionVertex, DefaultFailoverVertex> failoverVertexMap = new IdentityHashMap<>();
	for (ExecutionVertex vertex : executionGraph.getAllExecutionVertices()) {
		final DefaultFailoverVertex failoverVertex = new DefaultFailoverVertex(
			new ExecutionVertexID(vertex.getJobvertexId(), vertex.getParallelSubtaskIndex()),
			vertex.getTaskNameWithSubtaskIndex());
		this.failoverVertices.add(failoverVertex);
		failoverVertexMap.put(vertex, failoverVertex);
	}

	// generate edges
	connectVerticesWithEdges(failoverVertexMap);
}
 
Example #6
Source File: LegacyScheduler.java    From flink with Apache License 2.0 6 votes vote down vote up
private ExecutionGraph createExecutionGraph(
		JobManagerJobMetricGroup currentJobManagerJobMetricGroup,
		ShuffleMaster<?> shuffleMaster,
		final PartitionTracker partitionTracker) throws JobExecutionException, JobException {
	return ExecutionGraphBuilder.buildGraph(
		null,
		jobGraph,
		jobMasterConfiguration,
		futureExecutor,
		ioExecutor,
		slotProvider,
		userCodeLoader,
		checkpointRecoveryFactory,
		rpcTimeout,
		restartStrategy,
		currentJobManagerJobMetricGroup,
		blobWriter,
		slotRequestTimeout,
		log,
		shuffleMaster,
		partitionTracker);
}
 
Example #7
Source File: LegacyScheduler.java    From flink with Apache License 2.0 6 votes vote down vote up
private ExecutionGraph createAndRestoreExecutionGraph(
		JobManagerJobMetricGroup currentJobManagerJobMetricGroup,
		ShuffleMaster<?> shuffleMaster,
		PartitionTracker partitionTracker) throws Exception {

	ExecutionGraph newExecutionGraph = createExecutionGraph(currentJobManagerJobMetricGroup, shuffleMaster, partitionTracker);

	final CheckpointCoordinator checkpointCoordinator = newExecutionGraph.getCheckpointCoordinator();

	if (checkpointCoordinator != null) {
		// check whether we find a valid checkpoint
		if (!checkpointCoordinator.restoreLatestCheckpointedState(
			newExecutionGraph.getAllVertices(),
			false,
			false)) {

			// check whether we can restore from a savepoint
			tryRestoreExecutionGraphFromSavepoint(newExecutionGraph, jobGraph.getSavepointRestoreSettings());
		}
	}

	return newExecutionGraph;
}
 
Example #8
Source File: DefaultExecutionTopologyTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private ExecutionGraph createExecutionGraphWithCoLocationConstraint() throws Exception {
	JobVertex[] jobVertices = new JobVertex[2];
	int parallelism = 3;
	jobVertices[0] = createNoOpVertex("v1", parallelism);
	jobVertices[1] = createNoOpVertex("v2", parallelism);
	jobVertices[1].connectNewDataSetAsInput(jobVertices[0], ALL_TO_ALL, PIPELINED);

	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
	jobVertices[0].setSlotSharingGroup(slotSharingGroup);
	jobVertices[1].setSlotSharingGroup(slotSharingGroup);

	CoLocationGroup coLocationGroup = new CoLocationGroup();
	coLocationGroup.addVertex(jobVertices[0]);
	coLocationGroup.addVertex(jobVertices[1]);
	jobVertices[0].updateCoLocationGroup(coLocationGroup);
	jobVertices[1].updateCoLocationGroup(coLocationGroup);

	return createSimpleTestGraph(
		taskManagerGateway,
		triggeredRestartStrategy,
		jobVertices);
}
 
Example #9
Source File: ExecutionGraphToInputsLocationsRetrieverAdapterTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that it can get the task manager location in an Execution.
 */
@Test
public void testGetTaskManagerLocationWhenScheduled() throws Exception {
	final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);

	final TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlotBuilder().createTestingLogicalSlot();
	final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(jobVertex);
	final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever =
			new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);

	final ExecutionVertex onlyExecutionVertex = eg.getAllExecutionVertices().iterator().next();
	onlyExecutionVertex.deployToSlot(testingLogicalSlot);

	ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0);
	Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocationOptional =
			inputsLocationsRetriever.getTaskManagerLocation(executionVertexId);

	assertTrue(taskManagerLocationOptional.isPresent());

	final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture = taskManagerLocationOptional.get();
	assertThat(taskManagerLocationFuture.get(), is(testingLogicalSlot.getTaskManagerLocation()));
}
 
Example #10
Source File: ExecutionGraphCheckpointCoordinatorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the checkpoint coordinator is shut down if the execution graph
 * is failed.
 */
@Test
public void testShutdownCheckpointCoordinatorOnFailure() throws Exception {
	final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>();
	CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture);

	final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>();
	CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture);

	ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store);
	final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator();

	assertThat(checkpointCoordinator, Matchers.notNullValue());
	assertThat(checkpointCoordinator.isShutdown(), is(false));

	graph.failGlobal(new Exception("Test Exception"));

	assertThat(checkpointCoordinator.isShutdown(), is(true));
	assertThat(counterShutdownFuture.get(), is(JobStatus.FAILED));
	assertThat(storeShutdownFuture.get(), is(JobStatus.FAILED));
}
 
Example #11
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException {
	// configure the pipelined failover strategy
	final Configuration jobManagerConfig = new Configuration();
	jobManagerConfig.setString(
			JobManagerOptions.EXECUTION_FAILOVER_STRATEGY,
			FailoverStrategyLoader.LEGACY_PIPELINED_REGION_RESTART_STRATEGY_NAME);

	final Time timeout = Time.seconds(10L);
	return ExecutionGraphBuilder.buildGraph(
		null,
		jobGraph,
		jobManagerConfig,
		TestingUtils.defaultExecutor(),
		TestingUtils.defaultExecutor(),
		mock(SlotProvider.class),
		PipelinedFailoverRegionBuildingTest.class.getClassLoader(),
		new StandaloneCheckpointRecoveryFactory(),
		timeout,
		new NoRestartStrategy(),
		new UnregisteredMetricsGroup(),
		VoidBlobWriter.getInstance(),
		timeout,
		log,
		NettyShuffleMaster.INSTANCE,
		NoOpPartitionTracker.INSTANCE);
}
 
Example #12
Source File: PipelinedFailoverRegionBuildingTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException {
	// configure the pipelined failover strategy
	final Configuration jobManagerConfig = new Configuration();
	jobManagerConfig.setString(
			JobManagerOptions.EXECUTION_FAILOVER_STRATEGY,
			FailoverStrategyLoader.PIPELINED_REGION_RESTART_STRATEGY_NAME);

	final Time timeout = Time.seconds(10L);
	return ExecutionGraphBuilder.buildGraph(
		null,
		jobGraph,
		jobManagerConfig,
		TestingUtils.defaultExecutor(),
		TestingUtils.defaultExecutor(),
		mock(SlotProvider.class),
		PipelinedFailoverRegionBuildingTest.class.getClassLoader(),
		new StandaloneCheckpointRecoveryFactory(),
		timeout,
		new NoRestartStrategy(),
		new UnregisteredMetricsGroup(),
		1000,
		VoidBlobWriter.getInstance(),
		timeout,
		log);
}
 
Example #13
Source File: JobMaster.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private ExecutionGraph createExecutionGraph(JobManagerJobMetricGroup currentJobManagerJobMetricGroup) throws JobExecutionException, JobException {
	return ExecutionGraphBuilder.buildGraph(
		null,
		jobGraph,
		jobMasterConfiguration.getConfiguration(),
		scheduledExecutorService,
		scheduledExecutorService,
		scheduler,
		userCodeLoader,
		highAvailabilityServices.getCheckpointRecoveryFactory(),
		rpcTimeout,
		restartStrategy,
		currentJobManagerJobMetricGroup,
		blobWriter,
		jobMasterConfiguration.getSlotRequestTimeout(),
		log);
}
 
Example #14
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1) 
 *           X
 *     (a2) -+-> (b2) -+-> (c2)
 *           X
 *     (a3) -+-> (b3) -+-> (c3)
 *
 *           ^         ^
 *           |         |
 *     (pipelined) (blocking)
 *
 * </pre>
 */
@Test
public void testTwoComponentsViaBlockingExchange() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(3);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(2);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(2);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[0]);
	FailoverRegion region31 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]);
	FailoverRegion region32 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[1]);

	assertTrue(region1 == region2);
	assertTrue(region2 != region31);
	assertTrue(region32 != region31);
}
 
Example #15
Source File: SchedulerBase.java    From flink with Apache License 2.0 5 votes vote down vote up
private ExecutionGraph createExecutionGraph(
	JobManagerJobMetricGroup currentJobManagerJobMetricGroup,
	ShuffleMaster<?> shuffleMaster,
	final JobMasterPartitionTracker partitionTracker) throws JobExecutionException, JobException {

	final FailoverStrategy.Factory failoverStrategy = legacyScheduling ?
		FailoverStrategyLoader.loadFailoverStrategy(jobMasterConfiguration, log) :
		new NoOpFailoverStrategy.Factory();

	return ExecutionGraphBuilder.buildGraph(
		null,
		jobGraph,
		jobMasterConfiguration,
		futureExecutor,
		ioExecutor,
		slotProvider,
		userCodeLoader,
		checkpointRecoveryFactory,
		rpcTimeout,
		restartStrategy,
		currentJobManagerJobMetricGroup,
		blobWriter,
		slotRequestTimeout,
		log,
		shuffleMaster,
		partitionTracker,
		failoverStrategy);
}
 
Example #16
Source File: TaskDeploymentDescriptorFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
public static TaskDeploymentDescriptorFactory fromExecutionVertex(
		ExecutionVertex executionVertex,
		int attemptNumber) throws IOException {
	ExecutionGraph executionGraph = executionVertex.getExecutionGraph();
	return new TaskDeploymentDescriptorFactory(
		executionVertex.getCurrentExecutionAttempt().getAttemptId(),
		attemptNumber,
		getSerializedJobInformation(executionGraph),
		getSerializedTaskInformation(executionVertex.getJobVertex().getTaskInformationOrBlobKey()),
		executionGraph.getJobID(),
		executionGraph.getScheduleMode().allowLazyDeployment(),
		executionVertex.getParallelSubtaskIndex(),
		executionVertex.getAllInputEdges());
}
 
Example #17
Source File: ExecutionGraphCheckpointCoordinatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ExecutionGraph createExecutionGraphAndEnableCheckpointing(
		CheckpointIDCounter counter,
		CompletedCheckpointStore store) throws Exception {
	final Time timeout = Time.days(1L);

	JobVertex jobVertex = new JobVertex("MockVertex");
	jobVertex.setInvokableClass(AbstractInvokable.class);

	final ExecutionGraph executionGraph = new ExecutionGraphTestUtils.TestingExecutionGraphBuilder(jobVertex)
		.setRpcTimeout(timeout)
		.setAllocationTimeout(timeout)
		.allowQueuedScheduling()
		.build();

	executionGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());

	CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration(
		100,
		100,
		100,
		1,
		CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
		true,
		false,
		0);

	executionGraph.enableCheckpointing(
			chkConfig,
			Collections.emptyList(),
			Collections.emptyList(),
			Collections.emptyList(),
			Collections.emptyList(),
			counter,
			store,
			new MemoryStateBackend(),
			CheckpointStatsTrackerTest.createTestTracker());

	return executionGraph;
}
 
Example #18
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that validates that a single pipelined component via a sequence of all-to-all
 * connections works correctly.
 * 
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1) 
 *           X         X
 *     (a2) -+-> (b2) -+-> (c2)
 *           X         X
 *     (a3) -+-> (b3) -+-> (c3)
 *
 *     ...
 * </pre>
 */
@Test
public void testOneComponentViaTwoExchanges() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(3);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(5);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(2);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[4]);
	FailoverRegion region3 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]);

	assertTrue(region1 == region2);
	assertTrue(region2 == region3);
}
 
Example #19
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDiamondWithMixedPipelinedAndBlockingExchanges() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(8);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(8);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(8);

	final JobVertex vertex4 = new JobVertex("vertex4");
	vertex4.setInvokableClass(NoOpInvokable.class);
	vertex4.setParallelism(8);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	vertex3.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	vertex4.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex4.connectNewDataSetAsInput(vertex3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3, vertex4);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();

	Iterator<ExecutionVertex> evs = eg.getAllExecutionVertices().iterator();

	FailoverRegion preRegion = failoverStrategy.getFailoverRegion(evs.next());

	while (evs.hasNext()) {
		FailoverRegion region = failoverStrategy.getFailoverRegion(evs.next());
		assertTrue(preRegion == region);
	}
}
 
Example #20
Source File: DefaultFailoverTopologyTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests the case that the graph has collocation constraints.
 */
@Test
public void testWithCollocationConstraints() throws Exception {
	ExecutionGraph executionGraph = createExecutionGraph(true);
	DefaultFailoverTopology adapter = new DefaultFailoverTopology(executionGraph);
	assertTrue(adapter.containsCoLocationConstraints());
}
 
Example #21
Source File: FailoverRegion.java    From flink with Apache License 2.0 5 votes vote down vote up
public FailoverRegion(
	ExecutionGraph executionGraph,
	List<ExecutionVertex> connectedExecutions,
	Map<JobVertexID, ExecutionJobVertex> tasks) {

	this.executionGraph = checkNotNull(executionGraph);
	this.connectedExecutionVertexes = checkNotNull(connectedExecutions);
	this.tasks = checkNotNull(tasks);

	LOG.debug("Created failover region {} with vertices: {}", id, connectedExecutions);
}
 
Example #22
Source File: PipelinedFailoverRegionBuildingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testDiamondWithMixedPipelinedAndBlockingExchanges() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(8);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(8);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(8);

	final JobVertex vertex4 = new JobVertex("vertex4");
	vertex4.setInvokableClass(NoOpInvokable.class);
	vertex4.setParallelism(8);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	vertex3.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	vertex4.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex4.connectNewDataSetAsInput(vertex3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3, vertex4);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();

	Iterator<ExecutionVertex> evs = eg.getAllExecutionVertices().iterator();

	FailoverRegion preRegion = failoverStrategy.getFailoverRegion(evs.next());

	while (evs.hasNext()) {
		FailoverRegion region = failoverStrategy.getFailoverRegion(evs.next());
		assertTrue(preRegion == region);
	}
}
 
Example #23
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1) 
 *           X         X
 *     (a2) -+-> (b2) -+-> (c2)
 *           X         X
 *     (a3) -+-> (b3) -+-> (c3)
 *
 *           ^         ^
 *           |         |
 *     (pipelined) (blocking)
 * </pre>
 */
@Test
public void testTwoComponentsViaBlockingExchange2() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(3);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(2);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(2);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[0]);
	FailoverRegion region31 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]);
	FailoverRegion region32 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[1]);

	assertTrue(region1 == region2);
	assertTrue(region2 != region31);
	assertTrue(region32 != region31);
}
 
Example #24
Source File: DefaultExecutionTopologyTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static void assertGraphEquals(
	ExecutionGraph originalGraph,
	DefaultExecutionTopology adaptedTopology) {

	Iterator<ExecutionVertex> originalVertices = originalGraph.getAllExecutionVertices().iterator();
	Iterator<DefaultExecutionVertex> adaptedVertices = adaptedTopology.getVertices().iterator();

	while (originalVertices.hasNext()) {
		ExecutionVertex originalVertex = originalVertices.next();
		DefaultExecutionVertex adaptedVertex = adaptedVertices.next();

		assertVertexEquals(originalVertex, adaptedVertex);

		List<IntermediateResultPartition> originalConsumedPartitions = IntStream.range(0, originalVertex.getNumberOfInputs())
			.mapToObj(originalVertex::getInputEdges)
			.flatMap(Arrays::stream)
			.map(ExecutionEdge::getSource)
			.collect(Collectors.toList());
		Iterable<DefaultResultPartition> adaptedConsumedPartitions = adaptedVertex.getConsumedResults();

		assertPartitionsEquals(originalConsumedPartitions, adaptedConsumedPartitions);

		Collection<IntermediateResultPartition> originalProducedPartitions = originalVertex.getProducedPartitions().values();
		Iterable<DefaultResultPartition> adaptedProducedPartitions = adaptedVertex.getProducedResults();

		assertPartitionsEquals(originalProducedPartitions, adaptedProducedPartitions);
	}

	assertFalse("Number of adapted vertices exceeds number of original vertices.", adaptedVertices.hasNext());
}
 
Example #25
Source File: DefaultExecutionTopology.java    From flink with Apache License 2.0 5 votes vote down vote up
public DefaultExecutionTopology(ExecutionGraph graph) {
	checkNotNull(graph, "execution graph can not be null");

	this.containsCoLocationConstraints = graph.getAllVertices().values().stream()
		.map(ExecutionJobVertex::getCoLocationGroup)
		.anyMatch(Objects::nonNull);

	this.executionVerticesById = new HashMap<>();
	this.executionVerticesList = new ArrayList<>(graph.getTotalNumberOfVertices());
	Map<IntermediateResultPartitionID, DefaultResultPartition> tmpResultPartitionsById = new HashMap<>();
	Map<ExecutionVertex, DefaultExecutionVertex> executionVertexMap = new HashMap<>();

	for (ExecutionVertex vertex : graph.getAllExecutionVertices()) {
		List<DefaultResultPartition> producedPartitions = generateProducedSchedulingResultPartition(vertex.getProducedPartitions());

		producedPartitions.forEach(partition -> tmpResultPartitionsById.put(partition.getId(), partition));

		DefaultExecutionVertex schedulingVertex = generateSchedulingExecutionVertex(vertex, producedPartitions);
		this.executionVerticesById.put(schedulingVertex.getId(), schedulingVertex);
		this.executionVerticesList.add(schedulingVertex);
		executionVertexMap.put(vertex, schedulingVertex);
	}
	this.resultPartitionsById = tmpResultPartitionsById;

	connectVerticesToConsumedPartitions(executionVertexMap, tmpResultPartitionsById);

	this.pipelinedRegionsByVertex = new HashMap<>();
	this.pipelinedRegions = new ArrayList<>();
	initializePipelinedRegions();
}
 
Example #26
Source File: DefaultSchedulingPipelinedRegionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests if the consumed inputs of the pipelined regions are computed
 * correctly using the Job graph below.
 * <pre>
 *          c
 *        /  X
 * a -+- b   e
 *       \  /
 *        d
 * </pre>
 * Pipelined regions: {a}, {b, c, d, e}
 */
@Test
public void returnsIncidentBlockingPartitions() throws Exception {
	final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1);

	b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
	c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
	e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final ExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e);
	final DefaultExecutionTopology topology = new DefaultExecutionTopology(simpleTestGraph);

	final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0));
	final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0));

	final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0));
	final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId();

	final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = IterableUtils.toStream(secondPipelinedRegion.getConsumedResults())
		.map(DefaultResultPartition::getId)
		.collect(Collectors.toSet());

	assertThat(firstPipelinedRegion.getConsumedResults().iterator().hasNext(), is(false));
	assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition));
}
 
Example #27
Source File: TaskDeploymentDescriptorFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
public static TaskDeploymentDescriptorFactory fromExecutionVertex(
		ExecutionVertex executionVertex,
		int attemptNumber) throws IOException {
	ExecutionGraph executionGraph = executionVertex.getExecutionGraph();
	return new TaskDeploymentDescriptorFactory(
		executionVertex.getCurrentExecutionAttempt().getAttemptId(),
		attemptNumber,
		getSerializedJobInformation(executionGraph),
		getSerializedTaskInformation(executionVertex.getJobVertex().getTaskInformationOrBlobKey()),
		executionGraph.getJobID(),
		executionGraph.getScheduleMode().allowLazyDeployment(),
		executionVertex.getParallelSubtaskIndex(),
		executionVertex.getAllInputEdges());
}
 
Example #28
Source File: PipelinedFailoverRegionBuildingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * This test checks that are strictly co-located vertices are in the same failover region,
 * even through they are connected via a blocking pattern.
 * This is currently an assumption / limitation of the scheduler.
 */
@Test
public void testPipelinedOneToOneTopologyWithCoLocation() throws Exception {
	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(NoOpInvokable.class);
	source.setParallelism(10);

	final JobVertex target = new JobVertex("target");
	target.setInvokableClass(NoOpInvokable.class);
	target.setParallelism(10);

	target.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final SlotSharingGroup sharingGroup = new SlotSharingGroup();
	source.setSlotSharingGroup(sharingGroup);
	target.setSlotSharingGroup(sharingGroup);

	source.setStrictlyCoLocatedWith(target);

	final JobGraph jobGraph = new JobGraph("test job", source, target);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion sourceRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]);
	FailoverRegion sourceRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[1]);
	FailoverRegion targetRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]);
	FailoverRegion targetRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[1]);

	// we use 'assertTrue' here rather than 'assertEquals' because we want to test
	// for referential equality, to be on the safe side
	assertTrue(sourceRegion1 == sourceRegion2);
	assertTrue(sourceRegion2 == targetRegion1);
	assertTrue(targetRegion1 == targetRegion2);
}
 
Example #29
Source File: PipelinedFailoverRegionBuildingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * This test checks that are strictly co-located vertices are in the same failover region,
 * even through they are connected via a blocking pattern.
 * This is currently an assumption / limitation of the scheduler.
 */
@Test
public void testBlockingAllToAllTopologyWithCoLocation() throws Exception {
	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(NoOpInvokable.class);
	source.setParallelism(10);

	final JobVertex target = new JobVertex("target");
	target.setInvokableClass(NoOpInvokable.class);
	target.setParallelism(13);

	target.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final SlotSharingGroup sharingGroup = new SlotSharingGroup();
	source.setSlotSharingGroup(sharingGroup);
	target.setSlotSharingGroup(sharingGroup);

	source.setStrictlyCoLocatedWith(target);

	final JobGraph jobGraph = new JobGraph("test job", source, target);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]);

	// we use 'assertTrue' here rather than 'assertEquals' because we want to test
	// for referential equality, to be on the safe side
	assertTrue(region1 == region2);
}
 
Example #30
Source File: TaskDeploymentDescriptorFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
private static MaybeOffloaded<JobInformation> getSerializedJobInformation(ExecutionGraph executionGraph) {
	Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey =
		executionGraph.getJobInformationOrBlobKey();
	if (jobInformationOrBlobKey.isLeft()) {
		return new TaskDeploymentDescriptor.NonOffloaded<>(jobInformationOrBlobKey.left());
	} else {
		return new TaskDeploymentDescriptor.Offloaded<>(jobInformationOrBlobKey.right());
	}
}