Java Code Examples for org.apache.flink.runtime.jobgraph.JobVertex#connectNewDataSetAsInput()

The following examples show how to use org.apache.flink.runtime.jobgraph.JobVertex#connectNewDataSetAsInput() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PartialConsumePipelinedResultTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests a fix for FLINK-1930.
 *
 * <p>When consuming a pipelined result only partially, is is possible that local channels
 * release the buffer pool, which is associated with the result partition, too early. If the
 * producer is still producing data when this happens, it runs into an IllegalStateException,
 * because of the destroyed buffer pool.
 *
 * @see <a href="https://issues.apache.org/jira/browse/FLINK-1930">FLINK-1930</a>
 */
@Test
public void testPartialConsumePipelinedResultReceiver() throws Exception {
	final JobVertex sender = new JobVertex("Sender");
	sender.setInvokableClass(SlowBufferSender.class);
	sender.setParallelism(PARALLELISM);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setInvokableClass(SingleBufferReceiver.class);
	receiver.setParallelism(PARALLELISM);

	// The partition needs to be pipelined, otherwise the original issue does not occur, because
	// the sender and receiver are not online at the same time.
	receiver.connectNewDataSetAsInput(
		sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph("Partial Consume of Pipelined Result", sender, receiver);

	final SlotSharingGroup slotSharingGroup = new SlotSharingGroup(
		sender.getID(), receiver.getID());

	sender.setSlotSharingGroup(slotSharingGroup);
	receiver.setSlotSharingGroup(slotSharingGroup);

	MINI_CLUSTER_RESOURCE.getMiniCluster().executeJobBlocking(jobGraph);
}
 
Example 2
Source File: ExecutionGraphRescalingTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static JobVertex[] createVerticesForSimpleBipartiteJobGraph(int parallelism, int maxParallelism) {
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");
	JobVertex v3 = new JobVertex("vertex3");
	JobVertex v4 = new JobVertex("vertex4");
	JobVertex v5 = new JobVertex("vertex5");

	JobVertex[] jobVertices = new JobVertex[]{v1, v2, v3, v4, v5};

	for (JobVertex jobVertex : jobVertices) {
		jobVertex.setInvokableClass(AbstractInvokable.class);
		jobVertex.setParallelism(parallelism);
		jobVertex.setMaxParallelism(maxParallelism);
	}

	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	return jobVertices;
}
 
Example 3
Source File: FileBufferReaderITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph() {
	final SlotSharingGroup group1 = new SlotSharingGroup();
	final SlotSharingGroup group2 = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(TestSourceInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(group1);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(TestSinkInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(group2);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES);

	return jobGraph;
}
 
Example 4
Source File: JobRecoveryITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private JobGraph createjobGraph(boolean slotSharingEnabled) throws IOException {
	final JobVertex sender = new JobVertex("Sender");
	sender.setParallelism(PARALLELISM);
	sender.setInvokableClass(TestingAbstractInvokables.Sender.class);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setParallelism(PARALLELISM);
	receiver.setInvokableClass(FailingOnceReceiver.class);
	FailingOnceReceiver.reset();

	if (slotSharingEnabled) {
		final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
		receiver.setSlotSharingGroup(slotSharingGroup);
		sender.setSlotSharingGroup(slotSharingGroup);
	}

	receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));

	final JobGraph jobGraph = new JobGraph(getClass().getSimpleName(), sender, receiver);
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example 5
Source File: ExecutionVertexInputConstraintTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testInputConstraintALLPerformance() throws Exception {
	final int parallelism = 1000;
	final JobVertex v1 = createVertexWithAllInputConstraints("vertex1", parallelism);
	final JobVertex v2 = createVertexWithAllInputConstraints("vertex2", parallelism);
	final JobVertex v3 = createVertexWithAllInputConstraints("vertex3", parallelism);
	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	v2.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final ExecutionGraph eg = createExecutionGraph(Arrays.asList(v1, v2, v3), InputDependencyConstraint.ALL, 3000);

	eg.start(mainThreadExecutor);
	eg.scheduleForExecution();

	for (int i = 0; i < parallelism - 1; i++) {
		finishSubtask(eg, v1.getID(), i);
	}

	final long startTime = System.nanoTime();
	finishSubtask(eg, v1.getID(), parallelism - 1);

	final Duration duration = Duration.ofNanos(System.nanoTime() - startTime);
	final Duration timeout = Duration.ofSeconds(5);

	assertThat(duration, lessThan(timeout));
}
 
Example 6
Source File: SlotCountExceedingParallelismTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private JobGraph createTestJobGraph(
		String jobName,
		int senderParallelism,
		int receiverParallelism) {

	// The sender and receiver invokable logic ensure that each subtask gets the expected data
	final JobVertex sender = new JobVertex("Sender");
	sender.setInvokableClass(RoundRobinSubtaskIndexSender.class);
	sender.getConfiguration().setInteger(RoundRobinSubtaskIndexSender.CONFIG_KEY, receiverParallelism);
	sender.setParallelism(senderParallelism);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setInvokableClass(SubtaskIndexReceiver.class);
	receiver.getConfiguration().setInteger(SubtaskIndexReceiver.CONFIG_KEY, senderParallelism);
	receiver.setParallelism(receiverParallelism);

	receiver.connectNewDataSetAsInput(
			sender,
			DistributionPattern.ALL_TO_ALL,
			ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(jobName, sender, receiver);

	// We need to allow queued scheduling, because there are not enough slots available
	// to run all tasks at once. We queue tasks and then let them finish/consume the blocking
	// result one after the other.
	jobGraph.setAllowQueuedScheduling(true);

	return jobGraph;
}
 
Example 7
Source File: DefaultSchedulingPipelinedRegionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests if the consumed inputs of the pipelined regions are computed
 * correctly using the Job graph below.
 * <pre>
 *          c
 *        /  X
 * a -+- b   e
 *       \  /
 *        d
 * </pre>
 * Pipelined regions: {a}, {b, c, d, e}
 */
@Test
public void returnsIncidentBlockingPartitions() throws Exception {
	final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1);
	final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1);

	b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
	c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
	e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final ExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e);
	final DefaultExecutionTopology topology = new DefaultExecutionTopology(simpleTestGraph);

	final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0));
	final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0));

	final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0));
	final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId();

	final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = IterableUtils.toStream(secondPipelinedRegion.getConsumedResults())
		.map(DefaultResultPartition::getId)
		.collect(Collectors.toSet());

	assertThat(firstPipelinedRegion.getConsumedResults().iterator().hasNext(), is(false));
	assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition));
}
 
Example 8
Source File: PointwisePatternTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void test2NToN() throws Exception {
	final int N = 17;
	
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(2 * N);
	v2.setParallelism(N);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));

	ExecutionGraph eg = getDummyExecutionGraph();
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	
	ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
	
	for (ExecutionVertex ev : target.getTaskVertices()) {
		assertEquals(1, ev.getNumberOfInputs());
		
		ExecutionEdge[] inEdges = ev.getInputEdges(0);
		assertEquals(2, inEdges.length);
		
		assertEquals(ev.getParallelSubtaskIndex() * 2, inEdges[0].getSource().getPartitionNumber());
		assertEquals(ev.getParallelSubtaskIndex() * 2 + 1, inEdges[1].getSource().getPartitionNumber());
	}
}
 
Example 9
Source File: StreamingJobGraphGenerator.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private void connect(Integer headOfChain, StreamEdge edge) {

		physicalEdgesInOrder.add(edge);

		Integer downStreamvertexID = edge.getTargetId();

		JobVertex headVertex = jobVertices.get(headOfChain);
		JobVertex downStreamVertex = jobVertices.get(downStreamvertexID);

		StreamConfig downStreamConfig = new StreamConfig(downStreamVertex.getConfiguration());

		downStreamConfig.setNumberOfInputs(downStreamConfig.getNumberOfInputs() + 1);

		StreamPartitioner<?> partitioner = edge.getPartitioner();
		JobEdge jobEdge;
		if (partitioner instanceof ForwardPartitioner || partitioner instanceof RescalePartitioner) {
			jobEdge = downStreamVertex.connectNewDataSetAsInput(
				headVertex,
				DistributionPattern.POINTWISE,
				ResultPartitionType.PIPELINED_BOUNDED);
		} else {
			jobEdge = downStreamVertex.connectNewDataSetAsInput(
					headVertex,
					DistributionPattern.ALL_TO_ALL,
					ResultPartitionType.PIPELINED_BOUNDED);
		}
		// set strategy name so that web interface can show it.
		jobEdge.setShipStrategyName(partitioner.toString());

		if (LOG.isDebugEnabled()) {
			LOG.debug("CONNECTED: {} - {} -> {}", partitioner.getClass().getSimpleName(),
					headOfChain, downStreamvertexID);
		}
	}
 
Example 10
Source File: PointwisePatternTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void test3NToN() throws Exception {
	final int N = 17;
	
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(3 * N);
	v2.setParallelism(N);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));

	ExecutionGraph eg = getDummyExecutionGraph();
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	
	ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
	
	for (ExecutionVertex ev : target.getTaskVertices()) {
		assertEquals(1, ev.getNumberOfInputs());
		
		ExecutionEdge[] inEdges = ev.getInputEdges(0);
		assertEquals(3, inEdges.length);
		
		assertEquals(ev.getParallelSubtaskIndex() * 3, inEdges[0].getSource().getPartitionNumber());
		assertEquals(ev.getParallelSubtaskIndex() * 3 + 1, inEdges[1].getSource().getPartitionNumber());
		assertEquals(ev.getParallelSubtaskIndex() * 3 + 2, inEdges[2].getSource().getPartitionNumber());
	}
}
 
Example 11
Source File: ExecutionGraphConstructionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a JobGraph of the following form:
 * 
 * <pre>
 *  v1--->v2-->\
 *              \
 *               v4 --->\
 *        ----->/        \
 *  v3-->/                v5
 *       \               /
 *        ------------->/
 * </pre>
 */
@Test
public void testCreateSimpleGraphBipartite() throws Exception {
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");
	JobVertex v3 = new JobVertex("vertex3");
	JobVertex v4 = new JobVertex("vertex4");
	JobVertex v5 = new JobVertex("vertex5");
	
	v1.setParallelism(5);
	v2.setParallelism(7);
	v3.setParallelism(2);
	v4.setParallelism(11);
	v5.setParallelism(4);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);
	v3.setInvokableClass(AbstractInvokable.class);
	v4.setInvokableClass(AbstractInvokable.class);
	v5.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v5.connectNewDataSetAsInput(v4, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	v5.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	
	List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2, v3, v4, v5));

	ExecutionGraph eg = createExecutionGraph();
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	
	verifyTestGraph(eg, v1, v2, v3, v4, v5);
}
 
Example 12
Source File: PipelinedFailoverRegionBuildingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testDiamondWithMixedPipelinedAndBlockingExchanges() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(8);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(8);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(8);

	final JobVertex vertex4 = new JobVertex("vertex4");
	vertex4.setInvokableClass(NoOpInvokable.class);
	vertex4.setParallelism(8);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	vertex3.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	vertex4.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex4.connectNewDataSetAsInput(vertex3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3, vertex4);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();

	Iterator<ExecutionVertex> evs = eg.getAllExecutionVertices().iterator();

	FailoverRegion preRegion = failoverStrategy.getFailoverRegion(evs.next());

	while (evs.hasNext()) {
		FailoverRegion region = failoverStrategy.getFailoverRegion(evs.next());
		assertTrue(preRegion == region);
	}
}
 
Example 13
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithAllVerticesFailingDuringInstantiation() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(InstantiationErrorSender.class);
		sender.setParallelism(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception in constructor").isPresent());
		}
	}
}
 
Example 14
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private JobGraph producerConsumerJobGraph() {
	final JobVertex producer = new JobVertex("Producer");
	producer.setInvokableClass(NoOpInvokable.class);
	final JobVertex consumer = new JobVertex("Consumer");
	consumer.setInvokableClass(NoOpInvokable.class);

	consumer.connectNewDataSetAsInput(producer, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(producer, consumer);
	jobGraph.setAllowQueuedScheduling(true);

	return jobGraph;
}
 
Example 15
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1) 
 *           X
 *     (a2) -+-> (b2) -+-> (c2)
 *           X
 *     (a3) -+-> (b3) -+-> (c3)
 *
 *           ^         ^
 *           |         |
 *     (pipelined) (blocking)
 *
 * </pre>
 */
@Test
public void testTwoComponentsViaBlockingExchange() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(3);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(2);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(2);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[0]);
	FailoverRegion region31 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]);
	FailoverRegion region32 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[1]);

	assertTrue(region1 == region2);
	assertTrue(region2 != region31);
	assertTrue(region32 != region31);
}
 
Example 16
Source File: PointwisePatternTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void test3NToN() throws Exception {
	final int N = 17;
	
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(3 * N);
	v2.setParallelism(N);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	List<JobVertex> ordered = new ArrayList<JobVertex>(Arrays.asList(v1, v2));

	ExecutionGraph eg = getDummyExecutionGraph();
	try {
		eg.attachJobGraph(ordered);
	}
	catch (JobException e) {
		e.printStackTrace();
		fail("Job failed with exception: " + e.getMessage());
	}
	
	ExecutionJobVertex target = eg.getAllVertices().get(v2.getID());
	
	for (ExecutionVertex ev : target.getTaskVertices()) {
		assertEquals(1, ev.getNumberOfInputs());
		
		ExecutionEdge[] inEdges = ev.getInputEdges(0);
		assertEquals(3, inEdges.length);
		
		assertEquals(ev.getParallelSubtaskIndex() * 3, inEdges[0].getSource().getPartitionNumber());
		assertEquals(ev.getParallelSubtaskIndex() * 3 + 1, inEdges[1].getSource().getPartitionNumber());
		assertEquals(ev.getParallelSubtaskIndex() * 3 + 2, inEdges[2].getSource().getPartitionNumber());
	}
}
 
Example 17
Source File: ExecutionGraphNotEnoughResourceTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestartWithSlotSharingAndNotEnoughResources() throws Exception {
	final int numRestarts = 10;
	final int parallelism = 20;

	SlotPool slotPool = null;
	try {
		slotPool = new TestingSlotPoolImpl(TEST_JOB_ID);
		final Scheduler scheduler = createSchedulerWithSlots(
			parallelism - 1, slotPool, new LocalTaskManagerLocation());

		final SlotSharingGroup sharingGroup = new SlotSharingGroup();

		final JobVertex source = new JobVertex("source");
		source.setInvokableClass(NoOpInvokable.class);
		source.setParallelism(parallelism);
		source.setSlotSharingGroup(sharingGroup);

		final JobVertex sink = new JobVertex("sink");
		sink.setInvokableClass(NoOpInvokable.class);
		sink.setParallelism(parallelism);
		sink.setSlotSharingGroup(sharingGroup);
		sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED_BOUNDED);

		final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Test Job", source, sink);
		jobGraph.setScheduleMode(ScheduleMode.EAGER);

		TestRestartStrategy restartStrategy = new TestRestartStrategy(numRestarts, false);

		final ExecutionGraph eg = TestingExecutionGraphBuilder
			.newBuilder()
			.setJobGraph(jobGraph)
			.setSlotProvider(scheduler)
			.setRestartStrategy(restartStrategy)
			.setAllocationTimeout(Time.milliseconds(1L))
			.build();

		eg.start(mainThreadExecutor);

		mainThreadExecutor.execute(ThrowingRunnable.unchecked(eg::scheduleForExecution));

		CommonTestUtils.waitUntilCondition(
			() -> CompletableFuture.supplyAsync(eg::getState, mainThreadExecutor).join() == JobStatus.FAILED,
			Deadline.fromNow(Duration.ofMillis(2000)));

		// the last suppressed restart is also counted
		assertEquals(numRestarts + 1, CompletableFuture.supplyAsync(eg::getNumberOfRestarts, mainThreadExecutor).join().longValue());

		final Throwable t = CompletableFuture.supplyAsync(eg::getFailureCause, mainThreadExecutor).join();
		if (!(t instanceof NoResourceAvailableException)) {
			ExceptionUtils.rethrowException(t, t.getMessage());
		}
	} finally {
		if (slotPool != null) {
			CompletableFuture.runAsync(slotPool::close, mainThreadExecutor).join();
		}
	}
}
 
Example 18
Source File: ExecutionGraphRestartTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRestartWithEagerSchedulingAndSlotSharing() throws Exception {
	// this test is inconclusive if not used with a proper multi-threaded executor
	assertTrue("test assumptions violated", ((ThreadPoolExecutor) executor).getCorePoolSize() > 1);

	final int parallelism = 20;

	try (SlotPool slotPool = createSlotPoolImpl()) {
		final Scheduler scheduler = createSchedulerWithSlots(parallelism, slotPool, new LocalTaskManagerLocation());

		final SlotSharingGroup sharingGroup = new SlotSharingGroup();

		final JobVertex source = new JobVertex("source");
		source.setInvokableClass(NoOpInvokable.class);
		source.setParallelism(parallelism);
		source.setSlotSharingGroup(sharingGroup);

		final JobVertex sink = new JobVertex("sink");
		sink.setInvokableClass(NoOpInvokable.class);
		sink.setParallelism(parallelism);
		sink.setSlotSharingGroup(sharingGroup);
		sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED_BOUNDED);

		TestRestartStrategy restartStrategy = TestRestartStrategy.directExecuting();

		final ExecutionGraph eg = new ExecutionGraphTestUtils.TestingExecutionGraphBuilder(TEST_JOB_ID, source, sink)
			.setSlotProvider(scheduler)
			.setIoExecutor(executor)
			.setFutureExecutor(executor)
			.setRestartStrategy(restartStrategy)
			.setScheduleMode(ScheduleMode.EAGER)
			.build();

		eg.start(mainThreadExecutor);

		eg.scheduleForExecution();

		switchToRunning(eg);

		// fail into 'RESTARTING'
		eg.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt().fail(
			new Exception("intended test failure"));

		assertEquals(JobStatus.FAILING, eg.getState());

		completeCancellingForAllVertices(eg);

		assertEquals(JobStatus.RUNNING, eg.getState());

		// clean termination
		switchToRunning(eg);
		finishAllVertices(eg);

		assertEquals(JobStatus.FINISHED, eg.getState());
	}
}
 
Example 19
Source File: ExecutionGraphDeploymentTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that a blocking batch job fails if there are not enough resources left to schedule the
 * succeeding tasks. This test case is related to [FLINK-4296] where finished producing tasks
 * swallow the fail exception when scheduling a consumer task.
 */
@Test
public void testNoResourceAvailableFailure() throws Exception {
	final JobID jobId = new JobID();
	JobVertex v1 = new JobVertex("source");
	JobVertex v2 = new JobVertex("sink");

	int dop1 = 1;
	int dop2 = 1;

	v1.setParallelism(dop1);
	v2.setParallelism(dop2);

	v1.setInvokableClass(BatchTask.class);
	v2.setInvokableClass(BatchTask.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);

	final ArrayDeque<CompletableFuture<LogicalSlot>> slotFutures = new ArrayDeque<>();
	for (int i = 0; i < dop1; i++) {
		slotFutures.addLast(CompletableFuture.completedFuture(new TestingLogicalSlotBuilder().createTestingLogicalSlot()));
	}

	final SlotProvider slotProvider = new TestingSlotProvider(ignore -> slotFutures.removeFirst());

	DirectScheduledExecutorService directExecutor = new DirectScheduledExecutorService();

	// execution graph that executes actions synchronously
	ExecutionGraph eg = createExecutionGraphWithoutQueuedScheduling(jobId, slotProvider, directExecutor, TestingUtils.defaultExecutor());

	eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());

	checkJobOffloaded(eg);

	List<JobVertex> ordered = Arrays.asList(v1, v2);
	eg.attachJobGraph(ordered);

	// schedule, this triggers mock deployment
	eg.scheduleForExecution();

	ExecutionAttemptID attemptID = eg.getJobVertex(v1.getID()).getTaskVertices()[0].getCurrentExecutionAttempt().getAttemptId();
	eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.RUNNING));
	eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.FINISHED, null));

	assertEquals(JobStatus.FAILED, eg.getState());
}
 
Example 20
Source File: JsonGeneratorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testGeneratorWithoutAnyAttachements() {
	try {
		JobVertex source1 = new JobVertex("source 1");
		
		JobVertex source2 = new JobVertex("source 2");
		source2.setInvokableClass(DummyInvokable.class);
		
		JobVertex source3 = new JobVertex("source 3");
		
		JobVertex intermediate1 = new JobVertex("intermediate 1");
		JobVertex intermediate2 = new JobVertex("intermediate 2");
		
		JobVertex join1 = new JobVertex("join 1");
		JobVertex join2 = new JobVertex("join 2");

		JobVertex sink1 = new JobVertex("sink 1");
		JobVertex sink2 = new JobVertex("sink 2");
		
		intermediate1.connectNewDataSetAsInput(source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		intermediate2.connectNewDataSetAsInput(source2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
		
		join1.connectNewDataSetAsInput(intermediate1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
		join1.connectNewDataSetAsInput(intermediate2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

		join2.connectNewDataSetAsInput(join1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		join2.connectNewDataSetAsInput(source3, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
		
		sink1.connectNewDataSetAsInput(join2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		sink2.connectNewDataSetAsInput(join1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

		JobGraph jg = new JobGraph("my job", source1, source2, source3,
				intermediate1, intermediate2, join1, join2, sink1, sink2);
		
		String plan = JsonPlanGenerator.generatePlan(jg);
		assertNotNull(plan);

		// validate the produced JSON
		ObjectMapper m = new ObjectMapper();
		JsonNode rootNode = m.readTree(plan);
		
		// core fields
		assertEquals(new TextNode(jg.getJobID().toString()), rootNode.get("jid"));
		assertEquals(new TextNode(jg.getName()), rootNode.get("name"));
		
		assertTrue(rootNode.path("nodes").isArray());
		
		for (Iterator<JsonNode> iter = rootNode.path("nodes").elements(); iter.hasNext(); ){
			JsonNode next = iter.next();
			
			JsonNode idNode = next.get("id");
			assertNotNull(idNode);
			assertTrue(idNode.isTextual());
			checkVertexExists(idNode.asText(), jg);
			
			String description = next.get("description").asText();
			assertTrue(
					description.startsWith("source") ||
					description.startsWith("sink") ||
					description.startsWith("intermediate") ||
					description.startsWith("join"));
		}
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}