Java Code Examples for org.apache.flink.runtime.jobgraph.JobVertex#setParallelism()

The following examples show how to use org.apache.flink.runtime.jobgraph.JobVertex#setParallelism() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ExecutionJobVertexTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static ExecutionJobVertex createExecutionJobVertex(
		int parallelism,
		int preconfiguredMaxParallelism) throws JobException {

	JobVertex jobVertex = new JobVertex("testVertex");
	jobVertex.setInvokableClass(AbstractInvokable.class);
	jobVertex.setParallelism(parallelism);

	if (NOT_CONFIGURED != preconfiguredMaxParallelism) {
		jobVertex.setMaxParallelism(preconfiguredMaxParallelism);
	}

	ExecutionGraph executionGraphMock = mock(ExecutionGraph.class);
	when(executionGraphMock.getFutureExecutor()).thenReturn(Executors.directExecutor());
	ExecutionJobVertex executionJobVertex =
			new ExecutionJobVertex(executionGraphMock, jobVertex, 1, Time.seconds(10));

	return executionJobVertex;
}
 
Example 2
Source File: CoLocationConstraintTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateConstraints() {
	try {
		JobVertexID id1 = new JobVertexID();
		JobVertexID id2 = new JobVertexID();

		JobVertex vertex1 = new JobVertex("vertex1", id1);
		vertex1.setParallelism(2);
		
		JobVertex vertex2 = new JobVertex("vertex2", id2);
		vertex2.setParallelism(3);
		
		CoLocationGroup group = new CoLocationGroup(vertex1, vertex2);
		
		AbstractID groupId = group.getId();
		assertNotNull(groupId);
		
		CoLocationConstraint constraint1 = group.getLocationConstraint(0);
		CoLocationConstraint constraint2 = group.getLocationConstraint(1);
		CoLocationConstraint constraint3 = group.getLocationConstraint(2);
		
		assertFalse(constraint1 == constraint2);
		assertFalse(constraint1 == constraint3);
		assertFalse(constraint2 == constraint3);
		
		assertEquals(groupId, constraint1.getGroupId());
		assertEquals(groupId, constraint2.getGroupId());
		assertEquals(groupId, constraint3.getGroupId());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 3
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nonnull
public JobGraph createKvJobGraph() {
	final JobVertex vertex1 = new JobVertex("v1");
	vertex1.setParallelism(4);
	vertex1.setMaxParallelism(16);
	vertex1.setInvokableClass(BlockingNoOpInvokable.class);

	final JobVertex vertex2 = new JobVertex("v2");
	vertex2.setParallelism(4);
	vertex2.setMaxParallelism(16);
	vertex2.setInvokableClass(BlockingNoOpInvokable.class);

	return new JobGraph(vertex1, vertex2);
}
 
Example 4
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCallFinalizeOnMasterBeforeJobCompletes() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex source = new JobVertex("Source");
		source.setInvokableClass(WaitingNoOpInvokable.class);
		source.setParallelism(parallelism);

		final WaitOnFinalizeJobVertex sink = new WaitOnFinalizeJobVertex("Sink", 20L);
		sink.setInvokableClass(NoOpInvokable.class);
		sink.setParallelism(parallelism);

		sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("SubtaskInFinalStateRaceCondition", source, sink);

		final CompletableFuture<JobSubmissionResult> submissionFuture = miniCluster.submitJob(jobGraph);

		final CompletableFuture<JobResult> jobResultFuture = submissionFuture.thenCompose(
			(JobSubmissionResult ignored) -> miniCluster.requestJobResult(jobGraph.getJobID()));

		jobResultFuture.get().toJobExecutionResult(getClass().getClassLoader());

		assertTrue(sink.finalizedOnMaster.get());
	}
}
 
Example 5
Source File: PipelinedFailoverRegionBuildingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that validates that a single pipelined component via a sequence of all-to-all
 * connections works correctly.
 * 
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1) 
 *           X         X
 *     (a2) -+-> (b2) -+-> (c2)
 *           X         X
 *     (a3) -+-> (b3) -+-> (c3)
 *
 *     ...
 * </pre>
 */
@Test
public void testOneComponentViaTwoExchanges() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(3);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(5);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(2);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[4]);
	FailoverRegion region3 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]);

	assertTrue(region1 == region2);
	assertTrue(region2 == region3);
}
 
Example 6
Source File: ExecutionGraphSchedulingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that an ongoing scheduling operation does not fail the {@link ExecutionGraph}
 * if it gets concurrently cancelled.
 */
@Test
public void testSchedulingOperationCancellationWhenCancel() throws Exception {
	final JobVertex jobVertex = new JobVertex("NoOp JobVertex");
	jobVertex.setInvokableClass(NoOpInvokable.class);
	jobVertex.setParallelism(2);
	final JobGraph jobGraph = new JobGraph(jobVertex);
	jobGraph.setScheduleMode(ScheduleMode.EAGER);
	jobGraph.setAllowQueuedScheduling(true);

	final CompletableFuture<LogicalSlot> slotFuture1 = new CompletableFuture<>();
	final CompletableFuture<LogicalSlot> slotFuture2 = new CompletableFuture<>();
	final ProgrammedSlotProvider slotProvider = new ProgrammedSlotProvider(2);
	slotProvider.addSlots(jobVertex.getID(), new CompletableFuture[]{slotFuture1, slotFuture2});
	final ExecutionGraph executionGraph = createExecutionGraph(jobGraph, slotProvider);

	executionGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
	executionGraph.scheduleForExecution();

	final TestingLogicalSlot slot = createTestingSlot();
	final CompletableFuture<?> releaseFuture = slot.getReleaseFuture();
	slotFuture1.complete(slot);

	// cancel should change the state of all executions to CANCELLED
	executionGraph.cancel();

	// complete the now CANCELLED execution --> this should cause a failure
	slotFuture2.complete(new TestingLogicalSlotBuilder().createTestingLogicalSlot());

	Thread.sleep(1L);
	// release the first slot to finish the cancellation
	releaseFuture.complete(null);

	// NOTE: This test will only occasionally fail without the fix since there is
	// a race between the releaseFuture and the slotFuture2
	assertThat(executionGraph.getTerminationFuture().get(), is(JobStatus.CANCELED));
}
 
Example 7
Source File: ExecutionGraphSchedulingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that a partially completed eager scheduling operation fails if a
 * completed slot is released. See FLINK-9099.
 */
@Test
public void testSlotReleasingFailsSchedulingOperation() throws Exception {
	final int parallelism = 2;

	final JobVertex jobVertex = new JobVertex("Testing job vertex");
	jobVertex.setInvokableClass(NoOpInvokable.class);
	jobVertex.setParallelism(parallelism);
	final JobGraph jobGraph = new JobGraph(jobVertex);
	jobGraph.setAllowQueuedScheduling(true);
	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	final ProgrammedSlotProvider slotProvider = new ProgrammedSlotProvider(parallelism);

	final SimpleSlot slot = createSlot(new SimpleAckingTaskManagerGateway(), jobGraph.getJobID(), new DummySlotOwner());
	slotProvider.addSlot(jobVertex.getID(), 0, CompletableFuture.completedFuture(slot));

	final CompletableFuture<LogicalSlot> slotFuture = new CompletableFuture<>();
	slotProvider.addSlot(jobVertex.getID(), 1, slotFuture);

	final ExecutionGraph executionGraph = createExecutionGraph(jobGraph, slotProvider);

	executionGraph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread());
	executionGraph.scheduleForExecution();

	assertThat(executionGraph.getState(), is(JobStatus.RUNNING));

	final ExecutionJobVertex executionJobVertex = executionGraph.getJobVertex(jobVertex.getID());
	final ExecutionVertex[] taskVertices = executionJobVertex.getTaskVertices();
	assertThat(taskVertices[0].getExecutionState(), is(ExecutionState.SCHEDULED));
	assertThat(taskVertices[1].getExecutionState(), is(ExecutionState.SCHEDULED));

	// fail the single allocated slot --> this should fail the scheduling operation
	slot.releaseSlot(new FlinkException("Test failure"));

	assertThat(executionGraph.getTerminationFuture().get(), is(JobStatus.FAILED));
}
 
Example 8
Source File: AdaptedRestartPipelinedRegionStrategyNGConcurrentFailoverTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creating a sample ExecutionGraph for testing with topology as below.
 * <pre>
 *     (v11) -+-> (v21)
 *            x
 *     (v12) -+-> (v22)
 *
 *            ^
 *            |
 *       (blocking)
 * </pre>
 * 4 regions. Each consists of one individual execution vertex.
 */
private ExecutionGraph createExecutionGraph() throws Exception {

	final JobVertex v1 = new JobVertex("vertex1");
	v1.setInvokableClass(NoOpInvokable.class);
	v1.setParallelism(DEFAULT_PARALLELISM);

	final JobVertex v2 = new JobVertex("vertex2");
	v2.setInvokableClass(NoOpInvokable.class);
	v2.setParallelism(DEFAULT_PARALLELISM);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jg = new JobGraph(TEST_JOB_ID, "testjob", v1, v2);

	final SimpleSlotProvider slotProvider = new SimpleSlotProvider(TEST_JOB_ID, DEFAULT_PARALLELISM);

	final PartitionTracker partitionTracker = new PartitionTrackerImpl(
		jg.getJobID(),
		NettyShuffleMaster.INSTANCE,
		ignored -> Optional.empty());

	final ExecutionGraph graph = new ExecutionGraphTestUtils.TestingExecutionGraphBuilder(jg)
		.setRestartStrategy(manuallyTriggeredRestartStrategy)
		.setFailoverStrategyFactory(TestAdaptedRestartPipelinedRegionStrategyNG::new)
		.setSlotProvider(slotProvider)
		.setPartitionTracker(partitionTracker)
		.build();

	graph.start(componentMainThreadExecutor);

	return graph;
}
 
Example 9
Source File: MiniClusterITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testTwoInputJob() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(6 * parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender1 = new JobVertex("Sender1");
		sender1.setInvokableClass(Sender.class);
		sender1.setParallelism(parallelism);

		final JobVertex sender2 = new JobVertex("Sender2");
		sender2.setInvokableClass(Sender.class);
		sender2.setParallelism(2 * parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(AgnosticBinaryReceiver.class);
		receiver.setParallelism(3 * parallelism);

		receiver.connectNewDataSetAsInput(sender1, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);
		receiver.connectNewDataSetAsInput(sender2, DistributionPattern.ALL_TO_ALL,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Bipartite Job", sender1, receiver, sender2);

		miniCluster.executeJobBlocking(jobGraph);
	}
}
 
Example 10
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nonnull
public JobGraph createKvJobGraph() {
	final JobVertex vertex1 = new JobVertex("v1");
	vertex1.setParallelism(4);
	vertex1.setMaxParallelism(16);
	vertex1.setInvokableClass(BlockingNoOpInvokable.class);

	final JobVertex vertex2 = new JobVertex("v2");
	vertex2.setParallelism(4);
	vertex2.setMaxParallelism(16);
	vertex2.setInvokableClass(BlockingNoOpInvokable.class);

	return new JobGraph(vertex1, vertex2);
}
 
Example 11
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithAllVerticesFailingDuringInstantiation() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(InstantiationErrorSender.class);
		sender.setParallelism(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception in constructor").isPresent());
		}
	}
}
 
Example 12
Source File: PointwisePatternTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void test2NToN() throws Exception {
	final int N = 17;
	
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(2 * N);
	v2.setParallelism(N);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));

	ExecutionGraph eg = getDummyExecutionGraph();
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	
	ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
	
	for (ExecutionVertex ev : target.getTaskVertices()) {
		assertEquals(1, ev.getNumberOfInputs());
		
		ExecutionEdge[] inEdges = ev.getInputEdges(0);
		assertEquals(2, inEdges.length);
		
		assertEquals(ev.getParallelSubtaskIndex() * 2, inEdges[0].getSource().getPartitionNumber());
		assertEquals(ev.getParallelSubtaskIndex() * 2 + 1, inEdges[1].getSource().getPartitionNumber());
	}
}
 
Example 13
Source File: OperatorCoordinatorSchedulerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private DefaultScheduler setupTestJobAndScheduler(
		OperatorCoordinator.Provider provider,
		@Nullable TaskExecutorOperatorEventGateway taskExecutorOperatorEventGateway,
		@Nullable Consumer<JobGraph> jobGraphPreProcessing,
		boolean restartAllOnFailover) throws Exception {

	final OperatorIDPair opIds = OperatorIDPair.of(new OperatorID(), provider.getOperatorId());
	final JobVertex vertex = new JobVertex("Vertex with OperatorCoordinator", testVertexId, Collections.singletonList(opIds));
	vertex.setInvokableClass(NoOpInvokable.class);
	vertex.addOperatorCoordinator(new SerializedValue<>(provider));
	vertex.setParallelism(2);

	final JobGraph jobGraph = new JobGraph("test job with OperatorCoordinator", vertex);
	SchedulerTestingUtils.enableCheckpointing(jobGraph);
	if (jobGraphPreProcessing != null) {
		jobGraphPreProcessing.accept(jobGraph);
	}

	final SchedulerTestingUtils.DefaultSchedulerBuilder schedulerBuilder = taskExecutorOperatorEventGateway == null
			? SchedulerTestingUtils.createSchedulerBuilder(jobGraph, executor)
			: SchedulerTestingUtils.createSchedulerBuilder(jobGraph, executor, taskExecutorOperatorEventGateway);
	if (restartAllOnFailover) {
		schedulerBuilder.setFailoverStrategyFactory(new RestartAllFailoverStrategy.Factory());
	}

	final DefaultScheduler scheduler = schedulerBuilder.build();

	final ComponentMainThreadExecutor mainThreadExecutor = new ComponentMainThreadExecutorServiceAdapter(
		(ScheduledExecutorService) executor, Thread.currentThread());
	scheduler.setMainThreadExecutor(mainThreadExecutor);

	this.createdScheduler = scheduler;
	return scheduler;
}
 
Example 14
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * This test checks that are strictly co-located vertices are in the same failover region,
 * even through they are connected via a blocking pattern.
 * This is currently an assumption / limitation of the scheduler.
 */
@Test
public void testPipelinedOneToOneTopologyWithCoLocation() throws Exception {
	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(NoOpInvokable.class);
	source.setParallelism(10);

	final JobVertex target = new JobVertex("target");
	target.setInvokableClass(NoOpInvokable.class);
	target.setParallelism(10);

	target.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final SlotSharingGroup sharingGroup = new SlotSharingGroup();
	source.setSlotSharingGroup(sharingGroup);
	target.setSlotSharingGroup(sharingGroup);

	source.setStrictlyCoLocatedWith(target);

	final JobGraph jobGraph = new JobGraph("test job", source, target);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion sourceRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]);
	FailoverRegion sourceRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[1]);
	FailoverRegion targetRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]);
	FailoverRegion targetRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[1]);

	// we use 'assertTrue' here rather than 'assertEquals' because we want to test
	// for referential equality, to be on the safe side
	assertTrue(sourceRegion1 == sourceRegion2);
	assertTrue(sourceRegion2 == targetRegion1);
	assertTrue(targetRegion1 == targetRegion2);
}
 
Example 15
Source File: PointwisePatternTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private void testHighToLow(int highDop, int lowDop) throws Exception {
	if (highDop < lowDop) {
		throw new IllegalArgumentException();
	}
	
	final int factor = highDop / lowDop;
	final int delta = highDop % lowDop == 0 ? 0 : 1;
	
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(highDop);
	v2.setParallelism(lowDop);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));

	ExecutionGraph eg = getDummyExecutionGraph();
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	
	ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
	
	int[] timesUsed = new int[highDop];
	
	for (ExecutionVertex ev : target.getTaskVertices()) {
		assertEquals(1, ev.getNumberOfInputs());
		
		ExecutionEdge[] inEdges = ev.getInputEdges(0);
		assertTrue(inEdges.length >= factor && inEdges.length <= factor + delta);
		
		for (ExecutionEdge ee : inEdges) {
			timesUsed[ee.getSource().getPartitionNumber()]++;
		}
	}

	for (int used : timesUsed) {
		assertEquals(1, used);
	}
}
 
Example 16
Source File: VertexSlotSharingTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testAssignSlotSharingGroup() {
	try {
		JobVertex v1 = new JobVertex("v1");
		JobVertex v2 = new JobVertex("v2");
		JobVertex v3 = new JobVertex("v3");
		JobVertex v4 = new JobVertex("v4");
		JobVertex v5 = new JobVertex("v5");
		
		v1.setParallelism(4);
		v2.setParallelism(5);
		v3.setParallelism(7);
		v4.setParallelism(1);
		v5.setParallelism(11);

		v1.setInvokableClass(AbstractInvokable.class);
		v2.setInvokableClass(AbstractInvokable.class);
		v3.setInvokableClass(AbstractInvokable.class);
		v4.setInvokableClass(AbstractInvokable.class);
		v5.setInvokableClass(AbstractInvokable.class);

		v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		v5.connectNewDataSetAsInput(v4, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		
		SlotSharingGroup jg1 = new SlotSharingGroup();
		v2.setSlotSharingGroup(jg1);
		v3.setSlotSharingGroup(jg1);
		
		SlotSharingGroup jg2 = new SlotSharingGroup();
		v4.setSlotSharingGroup(jg2);
		v5.setSlotSharingGroup(jg2);
		
		List<JobVertex> vertices = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));
		
		ExecutionGraph eg = new ExecutionGraph(
			TestingUtils.defaultExecutor(),
			TestingUtils.defaultExecutor(),
			new JobID(),
			"test job",
			new Configuration(),
			new SerializedValue<>(new ExecutionConfig()),
			AkkaUtils.getDefaultTimeout(),
			new NoRestartStrategy(),
			new TestingSlotProvider(ignored -> new CompletableFuture<>()));
		eg.attachJobGraph(vertices);
		
		// verify that the vertices are all in the same slot sharing group
		SlotSharingGroup group1 = null;
		SlotSharingGroup group2 = null;
		
		// verify that v1 tasks have no slot sharing group
		assertNull(eg.getJobVertex(v1.getID()).getSlotSharingGroup());
		
		// v2 and v3 are shared
		group1 = eg.getJobVertex(v2.getID()).getSlotSharingGroup();
		assertNotNull(group1);
		assertEquals(group1, eg.getJobVertex(v3.getID()).getSlotSharingGroup());
		
		assertEquals(2, group1.getJobVertexIds().size());
		assertTrue(group1.getJobVertexIds().contains(v2.getID()));
		assertTrue(group1.getJobVertexIds().contains(v3.getID()));
		
		// v4 and v5 are shared
		group2 = eg.getJobVertex(v4.getID()).getSlotSharingGroup();
		assertNotNull(group2);
		assertEquals(group2, eg.getJobVertex(v5.getID()).getSlotSharingGroup());
		
		assertEquals(2, group1.getJobVertexIds().size());
		assertTrue(group2.getJobVertexIds().contains(v4.getID()));
		assertTrue(group2.getJobVertexIds().contains(v5.getID()));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 17
Source File: ArchivedExecutionGraphTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupExecutionGraph() throws Exception {
	// -------------------------------------------------------------------------------------------------------------
	// Setup
	// -------------------------------------------------------------------------------------------------------------

	JobVertexID v1ID = new JobVertexID();
	JobVertexID v2ID = new JobVertexID();

	JobVertex v1 = new JobVertex("v1", v1ID);
	JobVertex v2 = new JobVertex("v2", v2ID);

	v1.setParallelism(1);
	v2.setParallelism(2);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	JobGraph jobGraph = new JobGraph(v1, v2);
	ExecutionConfig config = new ExecutionConfig();

	config.setExecutionMode(ExecutionMode.BATCH_FORCED);
	config.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration());
	config.setParallelism(4);
	config.enableObjectReuse();
	config.setGlobalJobParameters(new TestJobParameters());

	jobGraph.setExecutionConfig(config);

	runtimeGraph = TestingExecutionGraphBuilder
		.newBuilder()
		.setJobGraph(jobGraph)
		.build();

	runtimeGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());

	List<ExecutionJobVertex> jobVertices = new ArrayList<>();
	jobVertices.add(runtimeGraph.getJobVertex(v1ID));
	jobVertices.add(runtimeGraph.getJobVertex(v2ID));

	CheckpointStatsTracker statsTracker = new CheckpointStatsTracker(
			0,
			jobVertices,
			mock(CheckpointCoordinatorConfiguration.class),
			new UnregisteredMetricsGroup());

	CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration(
		100,
		100,
		100,
		1,
		CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
		true,
		false,
		false,
		0);

	runtimeGraph.enableCheckpointing(
		chkConfig,
		Collections.<ExecutionJobVertex>emptyList(),
		Collections.<ExecutionJobVertex>emptyList(),
		Collections.<ExecutionJobVertex>emptyList(),
		Collections.<MasterTriggerRestoreHook<?>>emptyList(),
		new StandaloneCheckpointIDCounter(),
		new StandaloneCompletedCheckpointStore(1),
		new MemoryStateBackend(),
		statsTracker);

	runtimeGraph.setJsonPlan("{}");

	runtimeGraph.getJobVertex(v2ID).getTaskVertices()[0].getCurrentExecutionAttempt().fail(new RuntimeException("This exception was thrown on purpose."));
}
 
Example 18
Source File: FailoverRegionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that if a task reports the result of its preceding task is failed,
 * its preceding task will be considered as failed, and start to failover
 * TODO: as the report part is not finished yet, this case is ignored temporarily
 * @throws Exception
 */
@Ignore
@Test
public void testSucceedingNoticePreceding() throws Exception {
	final JobID jobId = new JobID();
	final String jobName = "Test Job Sample Name";

	final SimpleSlotProvider slotProvider = new SimpleSlotProvider(jobId, 14);

	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(1);
	v2.setParallelism(1);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));

	ExecutionGraph eg = new ExecutionGraph(
		new DummyJobInformation(
			jobId,
			jobName),
		TestingUtils.defaultExecutor(),
		TestingUtils.defaultExecutor(),
		AkkaUtils.getDefaultTimeout(),
		new InfiniteDelayRestartStrategy(10),
		new FailoverPipelinedRegionWithDirectExecutor(),
		slotProvider);
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	eg.setScheduleMode(ScheduleMode.EAGER);
	eg.scheduleForExecution();
	RestartPipelinedRegionStrategy strategy = (RestartPipelinedRegionStrategy)eg.getFailoverStrategy();

	ExecutionVertex ev11 = eg.getJobVertex(v2.getID()).getTaskVertices()[0];
	ExecutionVertex ev21 = eg.getJobVertex(v2.getID()).getTaskVertices()[0];
	ev21.getCurrentExecutionAttempt().fail(new Exception("Fail with v1"));

	assertEquals(JobStatus.CANCELLING, strategy.getFailoverRegion(ev21).getState());
	assertEquals(JobStatus.CANCELLING, strategy.getFailoverRegion(ev11).getState());
}
 
Example 19
Source File: ScheduleOrUpdateConsumersTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests notifications of multiple receivers when a task produces both a pipelined and blocking
 * result.
 *
 * <pre>
 *                             +----------+
 *            +-- pipelined -> | Receiver |
 * +--------+ |                +----------+
 * | Sender |-|
 * +--------+ |                +----------+
 *            +-- blocking --> | Receiver |
 *                             +----------+
 * </pre>
 *
 * <p>The pipelined receiver gets deployed after the first buffer is available and the blocking
 * one after all subtasks are finished.
 */
@Test
public void testMixedPipelinedAndBlockingResults() throws Exception {
	final JobVertex sender = new JobVertex("Sender");
	sender.setInvokableClass(BinaryRoundRobinSubtaskIndexSender.class);
	sender.getConfiguration().setInteger(BinaryRoundRobinSubtaskIndexSender.CONFIG_KEY, PARALLELISM);
	sender.setParallelism(PARALLELISM);

	final JobVertex pipelinedReceiver = new JobVertex("Pipelined Receiver");
	pipelinedReceiver.setInvokableClass(SlotCountExceedingParallelismTest.SubtaskIndexReceiver.class);
	pipelinedReceiver.getConfiguration().setInteger(CONFIG_KEY, PARALLELISM);
	pipelinedReceiver.setParallelism(PARALLELISM);

	pipelinedReceiver.connectNewDataSetAsInput(
			sender,
			DistributionPattern.ALL_TO_ALL,
			ResultPartitionType.PIPELINED);

	final JobVertex blockingReceiver = new JobVertex("Blocking Receiver");
	blockingReceiver.setInvokableClass(SlotCountExceedingParallelismTest.SubtaskIndexReceiver.class);
	blockingReceiver.getConfiguration().setInteger(CONFIG_KEY, PARALLELISM);
	blockingReceiver.setParallelism(PARALLELISM);

	blockingReceiver.connectNewDataSetAsInput(sender,
			DistributionPattern.ALL_TO_ALL,
			ResultPartitionType.BLOCKING);

	SlotSharingGroup slotSharingGroup = new SlotSharingGroup(
			sender.getID(), pipelinedReceiver.getID(), blockingReceiver.getID());

	sender.setSlotSharingGroup(slotSharingGroup);
	pipelinedReceiver.setSlotSharingGroup(slotSharingGroup);
	blockingReceiver.setSlotSharingGroup(slotSharingGroup);

	final JobGraph jobGraph = new JobGraph(
			"Mixed pipelined and blocking result",
			sender,
			pipelinedReceiver,
			blockingReceiver);

	MINI_CLUSTER_RESOURCE.getMiniCluster().executeJobBlocking(jobGraph);
}
 
Example 20
Source File: ZooKeeperSubmittedJobGraphsStoreITCase.java    From flink with Apache License 2.0 3 votes vote down vote up
private SubmittedJobGraph createSubmittedJobGraph(JobID jobId, String jobName) {
	final JobGraph jobGraph = new JobGraph(jobId, jobName);

	final JobVertex jobVertex = new JobVertex("Test JobVertex");
	jobVertex.setParallelism(1);

	jobGraph.addVertex(jobVertex);

	return new SubmittedJobGraph(jobGraph);
}