org.apache.flink.runtime.executiongraph.ExecutionGraph Java Examples
The following examples show how to use
org.apache.flink.runtime.executiongraph.ExecutionGraph.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: Flink-CEPplus Author: ljygz File: JobMaster.java License: Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph(JobManagerJobMetricGroup currentJobManagerJobMetricGroup) throws JobExecutionException, JobException { return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobMasterConfiguration.getConfiguration(), scheduledExecutorService, scheduledExecutorService, scheduler, userCodeLoader, highAvailabilityServices.getCheckpointRecoveryFactory(), rpcTimeout, restartStrategy, currentJobManagerJobMetricGroup, blobWriter, jobMasterConfiguration.getSlotRequestTimeout(), log); }
Example #2
Source Project: flink Author: flink-tpc-ds File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException { // configure the pipelined failover strategy final Configuration jobManagerConfig = new Configuration(); jobManagerConfig.setString( JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyLoader.LEGACY_PIPELINED_REGION_RESTART_STRATEGY_NAME); final Time timeout = Time.seconds(10L); return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobManagerConfig, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), PipelinedFailoverRegionBuildingTest.class.getClassLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), VoidBlobWriter.getInstance(), timeout, log, NettyShuffleMaster.INSTANCE, NoOpPartitionTracker.INSTANCE); }
Example #3
Source Project: Flink-CEPplus Author: ljygz File: ExecutionGraphCheckpointCoordinatorTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that the checkpoint coordinator is shut down if the execution graph * is failed. */ @Test public void testShutdownCheckpointCoordinatorOnFailure() throws Exception { final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>(); CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture); final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>(); CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture); ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store); final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator(); assertThat(checkpointCoordinator, Matchers.notNullValue()); assertThat(checkpointCoordinator.isShutdown(), is(false)); graph.failGlobal(new Exception("Test Exception")); assertThat(checkpointCoordinator.isShutdown(), is(true)); assertThat(counterShutdownFuture.get(), is(JobStatus.FAILED)); assertThat(storeShutdownFuture.get(), is(JobStatus.FAILED)); }
Example #4
Source Project: Flink-CEPplus Author: ljygz File: ExecutionGraphCheckpointCoordinatorTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that the checkpoint coordinator is shut down if the execution graph * is suspended. */ @Test public void testShutdownCheckpointCoordinatorOnSuspend() throws Exception { final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>(); CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture); final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>(); CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture); ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store); final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator(); assertThat(checkpointCoordinator, Matchers.notNullValue()); assertThat(checkpointCoordinator.isShutdown(), is(false)); graph.suspend(new Exception("Test Exception")); assertThat(checkpointCoordinator.isShutdown(), is(true)); assertThat(counterShutdownFuture.get(), is(JobStatus.SUSPENDED)); assertThat(storeShutdownFuture.get(), is(JobStatus.SUSPENDED)); }
Example #5
Source Project: flink Author: apache File: ExecutionGraphCheckpointCoordinatorTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that the checkpoint coordinator is shut down if the execution graph * is failed. */ @Test public void testShutdownCheckpointCoordinatorOnFailure() throws Exception { final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>(); CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture); final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>(); CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture); ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store); final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator(); assertThat(checkpointCoordinator, Matchers.notNullValue()); assertThat(checkpointCoordinator.isShutdown(), is(false)); graph.failGlobal(new Exception("Test Exception")); assertThat(checkpointCoordinator.isShutdown(), is(true)); assertThat(counterShutdownFuture.get(), is(JobStatus.FAILED)); assertThat(storeShutdownFuture.get(), is(JobStatus.FAILED)); }
Example #6
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph(JobGraph jobGraph) throws JobException, JobExecutionException { // configure the pipelined failover strategy final Configuration jobManagerConfig = new Configuration(); jobManagerConfig.setString( JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyLoader.PIPELINED_REGION_RESTART_STRATEGY_NAME); final Time timeout = Time.seconds(10L); return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobManagerConfig, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), mock(SlotProvider.class), PipelinedFailoverRegionBuildingTest.class.getClassLoader(), new StandaloneCheckpointRecoveryFactory(), timeout, new NoRestartStrategy(), new UnregisteredMetricsGroup(), 1000, VoidBlobWriter.getInstance(), timeout, log); }
Example #7
Source Project: flink Author: apache File: ExecutionGraphToInputsLocationsRetrieverAdapterTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that it can get the task manager location in an Execution. */ @Test public void testGetTaskManagerLocationWhenScheduled() throws Exception { final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1); final TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlotBuilder().createTestingLogicalSlot(); final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(jobVertex); final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg); final ExecutionVertex onlyExecutionVertex = eg.getAllExecutionVertices().iterator().next(); onlyExecutionVertex.deployToSlot(testingLogicalSlot); ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0); Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocationOptional = inputsLocationsRetriever.getTaskManagerLocation(executionVertexId); assertTrue(taskManagerLocationOptional.isPresent()); final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture = taskManagerLocationOptional.get(); assertThat(taskManagerLocationFuture.get(), is(testingLogicalSlot.getTaskManagerLocation())); }
Example #8
Source Project: flink Author: flink-tpc-ds File: LegacyScheduler.java License: Apache License 2.0 | 6 votes |
private ExecutionGraph createAndRestoreExecutionGraph( JobManagerJobMetricGroup currentJobManagerJobMetricGroup, ShuffleMaster<?> shuffleMaster, PartitionTracker partitionTracker) throws Exception { ExecutionGraph newExecutionGraph = createExecutionGraph(currentJobManagerJobMetricGroup, shuffleMaster, partitionTracker); final CheckpointCoordinator checkpointCoordinator = newExecutionGraph.getCheckpointCoordinator(); if (checkpointCoordinator != null) { // check whether we find a valid checkpoint if (!checkpointCoordinator.restoreLatestCheckpointedState( newExecutionGraph.getAllVertices(), false, false)) { // check whether we can restore from a savepoint tryRestoreExecutionGraphFromSavepoint(newExecutionGraph, jobGraph.getSavepointRestoreSettings()); } } return newExecutionGraph; }
Example #9
Source Project: flink Author: flink-tpc-ds File: LegacyScheduler.java License: Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraph( JobManagerJobMetricGroup currentJobManagerJobMetricGroup, ShuffleMaster<?> shuffleMaster, final PartitionTracker partitionTracker) throws JobExecutionException, JobException { return ExecutionGraphBuilder.buildGraph( null, jobGraph, jobMasterConfiguration, futureExecutor, ioExecutor, slotProvider, userCodeLoader, checkpointRecoveryFactory, rpcTimeout, restartStrategy, currentJobManagerJobMetricGroup, blobWriter, slotRequestTimeout, log, shuffleMaster, partitionTracker); }
Example #10
Source Project: flink Author: flink-tpc-ds File: DefaultFailoverTopology.java License: Apache License 2.0 | 6 votes |
public DefaultFailoverTopology(ExecutionGraph executionGraph) { checkNotNull(executionGraph); this.containsCoLocationConstraints = executionGraph.getAllVertices().values().stream() .map(ExecutionJobVertex::getCoLocationGroup) .anyMatch(Objects::nonNull); // generate vertices this.failoverVertices = new ArrayList<>(); final Map<ExecutionVertex, DefaultFailoverVertex> failoverVertexMap = new IdentityHashMap<>(); for (ExecutionVertex vertex : executionGraph.getAllExecutionVertices()) { final DefaultFailoverVertex failoverVertex = new DefaultFailoverVertex( new ExecutionVertexID(vertex.getJobvertexId(), vertex.getParallelSubtaskIndex()), vertex.getTaskNameWithSubtaskIndex()); this.failoverVertices.add(failoverVertex); failoverVertexMap.put(vertex, failoverVertex); } // generate edges connectVerticesWithEdges(failoverVertexMap); }
Example #11
Source Project: flink Author: flink-tpc-ds File: ExecutionGraphCheckpointCoordinatorTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that the checkpoint coordinator is shut down if the execution graph * is suspended. */ @Test public void testShutdownCheckpointCoordinatorOnSuspend() throws Exception { final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>(); CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture); final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>(); CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture); ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store); final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator(); assertThat(checkpointCoordinator, Matchers.notNullValue()); assertThat(checkpointCoordinator.isShutdown(), is(false)); graph.suspend(new Exception("Test Exception")); assertThat(checkpointCoordinator.isShutdown(), is(true)); assertThat(counterShutdownFuture.get(), is(JobStatus.SUSPENDED)); assertThat(storeShutdownFuture.get(), is(JobStatus.SUSPENDED)); }
Example #12
Source Project: flink Author: apache File: DefaultExecutionTopologyTest.java License: Apache License 2.0 | 6 votes |
private ExecutionGraph createExecutionGraphWithCoLocationConstraint() throws Exception { JobVertex[] jobVertices = new JobVertex[2]; int parallelism = 3; jobVertices[0] = createNoOpVertex("v1", parallelism); jobVertices[1] = createNoOpVertex("v2", parallelism); jobVertices[1].connectNewDataSetAsInput(jobVertices[0], ALL_TO_ALL, PIPELINED); SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); jobVertices[0].setSlotSharingGroup(slotSharingGroup); jobVertices[1].setSlotSharingGroup(slotSharingGroup); CoLocationGroup coLocationGroup = new CoLocationGroup(); coLocationGroup.addVertex(jobVertices[0]); coLocationGroup.addVertex(jobVertices[1]); jobVertices[0].updateCoLocationGroup(coLocationGroup); jobVertices[1].updateCoLocationGroup(coLocationGroup); return createSimpleTestGraph( taskManagerGateway, triggeredRestartStrategy, jobVertices); }
Example #13
Source Project: flink Author: flink-tpc-ds File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that validates that a graph with single unconnected vertices works correctly. * * <pre> * (v1) * * (v2) * * (v3) * * ... * </pre> */ @Test public void testIndividualVertices() throws Exception { final JobVertex source1 = new JobVertex("source1"); source1.setInvokableClass(NoOpInvokable.class); source1.setParallelism(2); final JobVertex source2 = new JobVertex("source2"); source2.setInvokableClass(NoOpInvokable.class); source2.setParallelism(2); final JobGraph jobGraph = new JobGraph("test job", source1, source2); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion sourceRegion11 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source1.getID()).getTaskVertices()[0]); FailoverRegion sourceRegion12 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source1.getID()).getTaskVertices()[1]); FailoverRegion targetRegion21 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source2.getID()).getTaskVertices()[0]); FailoverRegion targetRegion22 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source2.getID()).getTaskVertices()[1]); assertTrue(sourceRegion11 != sourceRegion12); assertTrue(sourceRegion12 != targetRegion21); assertTrue(targetRegion21 != targetRegion22); }
Example #14
Source Project: Flink-CEPplus Author: ljygz File: JobMaster.java License: Apache License 2.0 | 5 votes |
private void assignExecutionGraph( ExecutionGraph newExecutionGraph, JobManagerJobMetricGroup newJobManagerJobMetricGroup) { validateRunsInMainThread(); checkState(executionGraph.getState().isTerminalState()); checkState(jobManagerJobMetricGroup == null); executionGraph = newExecutionGraph; jobManagerJobMetricGroup = newJobManagerJobMetricGroup; }
Example #15
Source Project: Flink-CEPplus Author: ljygz File: JobMaster.java License: Apache License 2.0 | 5 votes |
/** * Tries to restore the given {@link ExecutionGraph} from the provided {@link SavepointRestoreSettings}. * * @param executionGraphToRestore {@link ExecutionGraph} which is supposed to be restored * @param savepointRestoreSettings {@link SavepointRestoreSettings} containing information about the savepoint to restore from * @throws Exception if the {@link ExecutionGraph} could not be restored */ private void tryRestoreExecutionGraphFromSavepoint(ExecutionGraph executionGraphToRestore, SavepointRestoreSettings savepointRestoreSettings) throws Exception { if (savepointRestoreSettings.restoreSavepoint()) { final CheckpointCoordinator checkpointCoordinator = executionGraphToRestore.getCheckpointCoordinator(); if (checkpointCoordinator != null) { checkpointCoordinator.restoreSavepoint( savepointRestoreSettings.getRestorePath(), savepointRestoreSettings.allowNonRestoredState(), executionGraphToRestore.getAllVertices(), userCodeLoader); } } }
Example #16
Source Project: flink Author: flink-tpc-ds File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
/** * This test checks that are strictly co-located vertices are in the same failover region, * even through they are connected via a blocking pattern. * This is currently an assumption / limitation of the scheduler. */ @Test public void testBlockingAllToAllTopologyWithCoLocation() throws Exception { final JobVertex source = new JobVertex("source"); source.setInvokableClass(NoOpInvokable.class); source.setParallelism(10); final JobVertex target = new JobVertex("target"); target.setInvokableClass(NoOpInvokable.class); target.setParallelism(13); target.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); final SlotSharingGroup sharingGroup = new SlotSharingGroup(); source.setSlotSharingGroup(sharingGroup); target.setSlotSharingGroup(sharingGroup); source.setStrictlyCoLocatedWith(target); final JobGraph jobGraph = new JobGraph("test job", source, target); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]); FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]); // we use 'assertTrue' here rather than 'assertEquals' because we want to test // for referential equality, to be on the safe side assertTrue(region1 == region2); }
Example #17
Source Project: flink Author: flink-tpc-ds File: DefaultFailoverTopologyTest.java License: Apache License 2.0 | 5 votes |
private ExecutionGraph createExecutionGraph(boolean addCollocationConstraints) throws Exception { JobVertex[] jobVertices = new JobVertex[3]; int parallelism = 3; jobVertices[0] = createNoOpVertex("v1", parallelism); jobVertices[1] = createNoOpVertex("v2", parallelism); jobVertices[2] = createNoOpVertex("v3", parallelism); jobVertices[1].connectNewDataSetAsInput(jobVertices[0], ALL_TO_ALL, BLOCKING); jobVertices[2].connectNewDataSetAsInput(jobVertices[1], POINTWISE, PIPELINED); if (addCollocationConstraints) { SlotSharingGroup slotSharingGroup = new SlotSharingGroup(); jobVertices[1].setSlotSharingGroup(slotSharingGroup); jobVertices[2].setSlotSharingGroup(slotSharingGroup); CoLocationGroup coLocationGroup = new CoLocationGroup(); coLocationGroup.addVertex(jobVertices[1]); coLocationGroup.addVertex(jobVertices[2]); jobVertices[1].updateCoLocationGroup(coLocationGroup); jobVertices[2].updateCoLocationGroup(coLocationGroup); } return createSimpleTestGraph( new JobID(), taskManagerGateway, triggeredRestartStrategy, jobVertices); }
Example #18
Source Project: Flink-CEPplus Author: ljygz File: FailoverRegion.java License: Apache License 2.0 | 5 votes |
public FailoverRegion( ExecutionGraph executionGraph, List<ExecutionVertex> connectedExecutions) { this.executionGraph = checkNotNull(executionGraph); this.connectedExecutionVertexes = checkNotNull(connectedExecutions); LOG.debug("Created failover region {} with vertices: {}", id, connectedExecutions); }
Example #19
Source Project: flink Author: flink-tpc-ds File: DefaultFailoverTopologyTest.java License: Apache License 2.0 | 5 votes |
/** * Tests the case that the graph has no collocation constraint. */ @Test public void testWithoutCollocationConstraints() throws Exception { ExecutionGraph executionGraph = createExecutionGraph(false); DefaultFailoverTopology adapter = new DefaultFailoverTopology(executionGraph); assertFalse(adapter.containsCoLocationConstraints()); }
Example #20
Source Project: Flink-CEPplus Author: ljygz File: ExecutionGraphCheckpointCoordinatorTest.java License: Apache License 2.0 | 5 votes |
/** * Tests that the checkpoint coordinator is shut down if the execution graph * is finished. */ @Test public void testShutdownCheckpointCoordinatorOnFinished() throws Exception { final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>(); CheckpointIDCounter counter = new TestingCheckpointIDCounter(counterShutdownFuture); final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>(); CompletedCheckpointStore store = new TestingCompletedCheckpointStore(storeShutdownFuture); ExecutionGraph graph = createExecutionGraphAndEnableCheckpointing(counter, store); final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator(); assertThat(checkpointCoordinator, Matchers.notNullValue()); assertThat(checkpointCoordinator.isShutdown(), is(false)); graph.scheduleForExecution(); for (ExecutionVertex executionVertex : graph.getAllExecutionVertices()) { final Execution currentExecutionAttempt = executionVertex.getCurrentExecutionAttempt(); graph.updateState(new TaskExecutionState(graph.getJobID(), currentExecutionAttempt.getAttemptId(), ExecutionState.FINISHED)); } assertThat(graph.getTerminationFuture().get(), is(JobStatus.FINISHED)); assertThat(checkpointCoordinator.isShutdown(), is(true)); assertThat(counterShutdownFuture.get(), is(JobStatus.FINISHED)); assertThat(storeShutdownFuture.get(), is(JobStatus.FINISHED)); }
Example #21
Source Project: Flink-CEPplus Author: ljygz File: ExecutionGraphCheckpointCoordinatorTest.java License: Apache License 2.0 | 5 votes |
private ExecutionGraph createExecutionGraphAndEnableCheckpointing( CheckpointIDCounter counter, CompletedCheckpointStore store) throws Exception { final Time timeout = Time.days(1L); ExecutionGraph executionGraph = new ExecutionGraph( new DummyJobInformation(), TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), timeout, new NoRestartStrategy(), new RestartAllStrategy.Factory(), new TestingSlotProvider(slotRequestId -> CompletableFuture.completedFuture(new TestingLogicalSlot())), ClassLoader.getSystemClassLoader(), VoidBlobWriter.getInstance(), timeout); executionGraph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread()); executionGraph.enableCheckpointing( 100, 100, 100, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), counter, store, new MemoryStateBackend(), CheckpointStatsTrackerTest.createTestTracker()); JobVertex jobVertex = new JobVertex("MockVertex"); jobVertex.setInvokableClass(AbstractInvokable.class); executionGraph.attachJobGraph(Collections.singletonList(jobVertex)); executionGraph.setQueuedSchedulingAllowed(true); return executionGraph; }
Example #22
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
/** * Tests that validates that a single pipelined component via a sequence of all-to-all * connections works correctly. * * <pre> * (a1) -+-> (b1) -+-> (c1) * X X * (a2) -+-> (b2) -+-> (c2) * X X * (a3) -+-> (b3) -+-> (c3) * * ... * </pre> */ @Test public void testOneComponentViaTwoExchanges() throws Exception { final JobVertex vertex1 = new JobVertex("vertex1"); vertex1.setInvokableClass(NoOpInvokable.class); vertex1.setParallelism(3); final JobVertex vertex2 = new JobVertex("vertex2"); vertex2.setInvokableClass(NoOpInvokable.class); vertex2.setParallelism(5); final JobVertex vertex3 = new JobVertex("vertex3"); vertex3.setInvokableClass(NoOpInvokable.class); vertex3.setParallelism(2); vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]); FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[4]); FailoverRegion region3 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]); assertTrue(region1 == region2); assertTrue(region2 == region3); }
Example #23
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
/** * <pre> * (a1) -+-> (b1) -+-> (c1) * X * (a2) -+-> (b2) -+-> (c2) * X * (a3) -+-> (b3) -+-> (c3) * * ^ ^ * | | * (pipelined) (blocking) * * </pre> */ @Test public void testTwoComponentsViaBlockingExchange() throws Exception { final JobVertex vertex1 = new JobVertex("vertex1"); vertex1.setInvokableClass(NoOpInvokable.class); vertex1.setParallelism(3); final JobVertex vertex2 = new JobVertex("vertex2"); vertex2.setInvokableClass(NoOpInvokable.class); vertex2.setParallelism(2); final JobVertex vertex3 = new JobVertex("vertex3"); vertex3.setInvokableClass(NoOpInvokable.class); vertex3.setParallelism(2); vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]); FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[0]); FailoverRegion region31 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]); FailoverRegion region32 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[1]); assertTrue(region1 == region2); assertTrue(region2 != region31); assertTrue(region32 != region31); }
Example #24
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
/** * <pre> * (a1) -+-> (b1) -+-> (c1) * X X * (a2) -+-> (b2) -+-> (c2) * X X * (a3) -+-> (b3) -+-> (c3) * * ^ ^ * | | * (pipelined) (blocking) * </pre> */ @Test public void testTwoComponentsViaBlockingExchange2() throws Exception { final JobVertex vertex1 = new JobVertex("vertex1"); vertex1.setInvokableClass(NoOpInvokable.class); vertex1.setParallelism(3); final JobVertex vertex2 = new JobVertex("vertex2"); vertex2.setInvokableClass(NoOpInvokable.class); vertex2.setParallelism(2); final JobVertex vertex3 = new JobVertex("vertex3"); vertex3.setInvokableClass(NoOpInvokable.class); vertex3.setParallelism(2); vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]); FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[0]); FailoverRegion region31 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]); FailoverRegion region32 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[1]); assertTrue(region1 == region2); assertTrue(region2 != region31); assertTrue(region32 != region31); }
Example #25
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
@Test public void testDiamondWithMixedPipelinedAndBlockingExchanges() throws Exception { final JobVertex vertex1 = new JobVertex("vertex1"); vertex1.setInvokableClass(NoOpInvokable.class); vertex1.setParallelism(8); final JobVertex vertex2 = new JobVertex("vertex2"); vertex2.setInvokableClass(NoOpInvokable.class); vertex2.setParallelism(8); final JobVertex vertex3 = new JobVertex("vertex3"); vertex3.setInvokableClass(NoOpInvokable.class); vertex3.setParallelism(8); final JobVertex vertex4 = new JobVertex("vertex4"); vertex4.setInvokableClass(NoOpInvokable.class); vertex4.setParallelism(8); vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); vertex3.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); vertex4.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); vertex4.connectNewDataSetAsInput(vertex3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3, vertex4); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); Iterator<ExecutionVertex> evs = eg.getAllExecutionVertices().iterator(); FailoverRegion preRegion = failoverStrategy.getFailoverRegion(evs.next()); while (evs.hasNext()) { FailoverRegion region = failoverStrategy.getFailoverRegion(evs.next()); assertTrue(preRegion == region); } }
Example #26
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
/** * This test checks that are strictly co-located vertices are in the same failover region, * even through they are connected via a blocking pattern. * This is currently an assumption / limitation of the scheduler. */ @Test public void testBlockingAllToAllTopologyWithCoLocation() throws Exception { final JobVertex source = new JobVertex("source"); source.setInvokableClass(NoOpInvokable.class); source.setParallelism(10); final JobVertex target = new JobVertex("target"); target.setInvokableClass(NoOpInvokable.class); target.setParallelism(13); target.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); final SlotSharingGroup sharingGroup = new SlotSharingGroup(); source.setSlotSharingGroup(sharingGroup); target.setSlotSharingGroup(sharingGroup); source.setStrictlyCoLocatedWith(target); final JobGraph jobGraph = new JobGraph("test job", source, target); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]); FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]); // we use 'assertTrue' here rather than 'assertEquals' because we want to test // for referential equality, to be on the safe side assertTrue(region1 == region2); }
Example #27
Source Project: Flink-CEPplus Author: ljygz File: PipelinedFailoverRegionBuildingTest.java License: Apache License 2.0 | 5 votes |
/** * This test checks that are strictly co-located vertices are in the same failover region, * even through they are connected via a blocking pattern. * This is currently an assumption / limitation of the scheduler. */ @Test public void testPipelinedOneToOneTopologyWithCoLocation() throws Exception { final JobVertex source = new JobVertex("source"); source.setInvokableClass(NoOpInvokable.class); source.setParallelism(10); final JobVertex target = new JobVertex("target"); target.setInvokableClass(NoOpInvokable.class); target.setParallelism(10); target.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final SlotSharingGroup sharingGroup = new SlotSharingGroup(); source.setSlotSharingGroup(sharingGroup); target.setSlotSharingGroup(sharingGroup); source.setStrictlyCoLocatedWith(target); final JobGraph jobGraph = new JobGraph("test job", source, target); final ExecutionGraph eg = createExecutionGraph(jobGraph); RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy(); FailoverRegion sourceRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]); FailoverRegion sourceRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[1]); FailoverRegion targetRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]); FailoverRegion targetRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[1]); // we use 'assertTrue' here rather than 'assertEquals' because we want to test // for referential equality, to be on the safe side assertTrue(sourceRegion1 == sourceRegion2); assertTrue(sourceRegion2 == targetRegion1); assertTrue(targetRegion1 == targetRegion2); }
Example #28
Source Project: flink Author: flink-tpc-ds File: TaskDeploymentDescriptorFactory.java License: Apache License 2.0 | 5 votes |
public static TaskDeploymentDescriptorFactory fromExecutionVertex( ExecutionVertex executionVertex, int attemptNumber) throws IOException { ExecutionGraph executionGraph = executionVertex.getExecutionGraph(); return new TaskDeploymentDescriptorFactory( executionVertex.getCurrentExecutionAttempt().getAttemptId(), attemptNumber, getSerializedJobInformation(executionGraph), getSerializedTaskInformation(executionVertex.getJobVertex().getTaskInformationOrBlobKey()), executionGraph.getJobID(), executionGraph.getScheduleMode().allowLazyDeployment(), executionVertex.getParallelSubtaskIndex(), executionVertex.getAllInputEdges()); }
Example #29
Source Project: flink Author: flink-tpc-ds File: TaskDeploymentDescriptorFactory.java License: Apache License 2.0 | 5 votes |
private static MaybeOffloaded<JobInformation> getSerializedJobInformation(ExecutionGraph executionGraph) { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = executionGraph.getJobInformationOrBlobKey(); if (jobInformationOrBlobKey.isLeft()) { return new TaskDeploymentDescriptor.NonOffloaded<>(jobInformationOrBlobKey.left()); } else { return new TaskDeploymentDescriptor.Offloaded<>(jobInformationOrBlobKey.right()); } }
Example #30
Source Project: flink Author: apache File: DefaultSchedulingPipelinedRegionTest.java License: Apache License 2.0 | 5 votes |
/** * Tests if the consumed inputs of the pipelined regions are computed * correctly using the Job graph below. * <pre> * c * / X * a -+- b e * \ / * d * </pre> * Pipelined regions: {a}, {b, c, d, e} */ @Test public void returnsIncidentBlockingPartitions() throws Exception { final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1); final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1); b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); final ExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e); final DefaultExecutionTopology topology = new DefaultExecutionTopology(simpleTestGraph); final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0)); final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0)); final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0)); final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId(); final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = IterableUtils.toStream(secondPipelinedRegion.getConsumedResults()) .map(DefaultResultPartition::getId) .collect(Collectors.toSet()); assertThat(firstPipelinedRegion.getConsumedResults().iterator().hasNext(), is(false)); assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition)); }