org.apache.flink.runtime.io.network.partition.ResultPartitionType Java Examples

The following examples show how to use org.apache.flink.runtime.io.network.partition.ResultPartitionType. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ShuffleCompressionITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph(
		ScheduleMode scheduleMode,
		ResultPartitionType resultPartitionType,
		ExecutionMode executionMode) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	JobVertex source = new JobVertex("source");
	source.setInvokableClass(LongValueSource.class);
	source.setParallelism(PARALLELISM);
	source.setSlotSharingGroup(slotSharingGroup);

	JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(ResultVerifyingSink.class);
	sink.setParallelism(PARALLELISM);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, resultPartitionType);
	JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(scheduleMode);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setExecutionMode(executionMode);
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example #2
Source File: ShuffleDescriptorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private static ShuffleDescriptor getConsumedPartitionShuffleDescriptor(
		ResultPartitionID id,
		ExecutionState state,
		@Nullable ResultPartitionDeploymentDescriptor producedPartition,
		boolean allowLazyDeployment) {
	ShuffleDescriptor shuffleDescriptor = TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor(
		id,
		ResultPartitionType.PIPELINED,
		true,
		state,
		allowLazyDeployment,
		producedPartition);
	assertThat(shuffleDescriptor, is(notNullValue()));
	assertThat(shuffleDescriptor.getResultPartitionID(), is(id));
	return shuffleDescriptor;
}
 
Example #3
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testInputConstraintALLPerf() throws Exception {
	final int parallelism = 1000;
	final JobVertex v1 = createVertexWithAllInputConstraints("vertex1", parallelism);
	final JobVertex v2 = createVertexWithAllInputConstraints("vertex2", parallelism);
	final JobVertex v3 = createVertexWithAllInputConstraints("vertex3", parallelism);
	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	v2.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(v1, v2, v3);
	final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
	final AccessExecutionJobVertex ejv1 = scheduler.requestJob().getAllVertices().get(v1.getID());

	for (int i = 0; i < parallelism - 1; i++) {
		finishSubtask(scheduler, ejv1, i);
	}

	final long startTime = System.nanoTime();
	finishSubtask(scheduler, ejv1, parallelism - 1);

	final Duration duration = Duration.ofNanos(System.nanoTime() - startTime);
	final Duration timeout = Duration.ofSeconds(5);

	assertThat(duration, lessThan(timeout));
}
 
Example #4
Source File: IntermediateResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testPipelinedPartitionConsumable() throws Exception {
	IntermediateResult result = createResult(ResultPartitionType.PIPELINED, 2);
	IntermediateResultPartition partition1 = result.getPartitions()[0];
	IntermediateResultPartition partition2 = result.getPartitions()[1];

	// Not consumable on init
	assertFalse(partition1.isConsumable());
	assertFalse(partition2.isConsumable());

	// Partition 1 consumable after data are produced
	partition1.markDataProduced();
	assertTrue(partition1.isConsumable());
	assertFalse(partition2.isConsumable());

	// Not consumable if failover happens
	result.resetForNewExecution();
	assertFalse(partition1.isConsumable());
	assertFalse(partition2.isConsumable());
}
 
Example #5
Source File: JobTaskVertexTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testConnectDirectly() {
	JobVertex source = new JobVertex("source");
	JobVertex target = new JobVertex("target");
	target.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	assertTrue(source.isInputVertex());
	assertFalse(source.isOutputVertex());
	assertFalse(target.isInputVertex());
	assertTrue(target.isOutputVertex());

	assertEquals(1, source.getNumberOfProducedIntermediateDataSets());
	assertEquals(1, target.getNumberOfInputs());

	assertEquals(target.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));

	assertEquals(1, source.getProducedDataSets().get(0).getConsumers().size());
	assertEquals(target, source.getProducedDataSets().get(0).getConsumers().get(0).getTarget());
}
 
Example #6
Source File: InputGateTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
protected SingleInputGate createInputGate(
	NettyShuffleEnvironment environment, int numberOfInputChannels, ResultPartitionType partitionType) {

	SingleInputGateBuilder builder = new SingleInputGateBuilder()
		.setNumberOfChannels(numberOfInputChannels)
		.setResultPartitionType(partitionType)
		.setIsCreditBased(enableCreditBasedFlowControl);

	if (environment != null) {
		builder = builder.setupBufferPoolFactory(environment);
	}

	SingleInputGate inputGate = builder.build();
	assertEquals(partitionType, inputGate.getConsumedPartitionType());
	return inputGate;
}
 
Example #7
Source File: FileBufferReaderITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph() {
	final SlotSharingGroup group1 = new SlotSharingGroup();
	final SlotSharingGroup group2 = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(TestSourceInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(group1);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(TestSinkInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(group2);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES);

	return jobGraph;
}
 
Example #8
Source File: ExecutionPartitionLifecycleTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void testPartitionReleaseOnStateTransitionsAfterRunning(Consumer<Execution> stateTransition1, Consumer<Execution> stateTransition2) throws Exception {
	final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
	final CompletableFuture<Tuple2<JobID, Collection<ResultPartitionID>>> releasePartitionsCallFuture = new CompletableFuture<>();
	taskManagerGateway.setReleasePartitionsConsumer(((jobID, partitionIds) -> releasePartitionsCallFuture.complete(Tuple2.of(jobID, partitionIds))));

	final TestingShuffleMaster testingShuffleMaster = new TestingShuffleMaster();

	setupExecutionGraphAndStartRunningJob(ResultPartitionType.PIPELINED, NoOpJobMasterPartitionTracker.INSTANCE, taskManagerGateway, testingShuffleMaster);

	stateTransition1.accept(execution);
	assertFalse(releasePartitionsCallFuture.isDone());

	stateTransition2.accept(execution);
	assertTrue(releasePartitionsCallFuture.isDone());

	final Tuple2<JobID, Collection<ResultPartitionID>> releasePartitionsCall = releasePartitionsCallFuture.get();
	assertEquals(jobId, releasePartitionsCall.f0);
	assertThat(releasePartitionsCall.f1, contains(descriptor.getShuffleDescriptor().getResultPartitionID()));

	assertEquals(1, testingShuffleMaster.externallyReleasedPartitions.size());
	assertEquals(descriptor.getShuffleDescriptor(), testingShuffleMaster.externallyReleasedPartitions.poll());
}
 
Example #9
Source File: JobTaskVertexTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testConnectMultipleTargets() {
	JobVertex source = new JobVertex("source");
	JobVertex target1 = new JobVertex("target1");
	JobVertex target2 = new JobVertex("target2");
	target1.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	target2.connectDataSetAsInput(source.getProducedDataSets().get(0), DistributionPattern.ALL_TO_ALL);

	assertTrue(source.isInputVertex());
	assertFalse(source.isOutputVertex());
	assertFalse(target1.isInputVertex());
	assertTrue(target1.isOutputVertex());
	assertFalse(target2.isInputVertex());
	assertTrue(target2.isOutputVertex());

	assertEquals(1, source.getNumberOfProducedIntermediateDataSets());
	assertEquals(2, source.getProducedDataSets().get(0).getConsumers().size());

	assertEquals(target1.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
	assertEquals(target2.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
}
 
Example #10
Source File: IntermediateResult.java    From flink with Apache License 2.0 6 votes vote down vote up
public IntermediateResult(
		IntermediateDataSetID id,
		ExecutionJobVertex producer,
		int numParallelProducers,
		ResultPartitionType resultType) {

	this.id = checkNotNull(id);
	this.producer = checkNotNull(producer);

	checkArgument(numParallelProducers >= 1);
	this.numParallelProducers = numParallelProducers;

	this.partitions = new IntermediateResultPartition[numParallelProducers];

	this.numberOfRunningProducers = new AtomicInteger(numParallelProducers);

	// we do not set the intermediate result partitions here, because we let them be initialized by
	// the execution vertex that produces them

	// assign a random connection index
	this.connectionIndex = (int) (Math.random() * Integer.MAX_VALUE);

	// The runtime type for this produced result
	this.resultType = checkNotNull(resultType);
}
 
Example #11
Source File: PipelinedRegionComputeUtilTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * This test checks that are strictly co-located vertices are in the same failover region,
 * even through they are not connected.
 * This is currently an assumption / limitation of the scheduler.
 * <pre>
 *     (a1) -+-> (b1)
 *
 *     (a2) -+-> (b2)
 * </pre>
 */
@Test
public void testPipelinedOneToOneTopologyWithCoLocation() {
	TestingSchedulingTopology topology = new TestingSchedulingTopology();

	TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();

	topology
		.connect(va1, vb1, ResultPartitionType.PIPELINED)
		.connect(va2, vb2, ResultPartitionType.PIPELINED);

	topology.setContainsCoLocationConstraints(true);

	Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);

	Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
	Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
	Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
	Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());

	assertSameRegion(ra1, ra2, rb1, rb2);
}
 
Example #12
Source File: JobExecutionITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(int parallelism) {
	final JobVertex sender = new JobVertex("Sender");
	sender.setParallelism(parallelism);
	sender.setInvokableClass(TestingAbstractInvokables.Sender.class);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setParallelism(parallelism);
	receiver.setInvokableClass(TestingAbstractInvokables.Receiver.class);

	// In order to make testCoLocationConstraintJobExecution fail, one needs to
	// remove the co-location constraint and the slot sharing groups, because then
	// the receivers will have to wait for the senders to finish and the slot
	// assignment order to the receivers is non-deterministic (depending on the
	// order in which the senders finish).
	final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
	receiver.setSlotSharingGroup(slotSharingGroup);
	sender.setSlotSharingGroup(slotSharingGroup);
	receiver.setStrictlyCoLocatedWith(sender);

	receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph(getClass().getSimpleName(), sender, receiver);

	return jobGraph;
}
 
Example #13
Source File: TaskExecutorITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(int parallelism) {
	final JobVertex sender = new JobVertex("Sender");
	sender.setParallelism(parallelism);
	sender.setInvokableClass(TestingAbstractInvokables.Sender.class);

	final JobVertex receiver = new JobVertex("Blocking receiver");
	receiver.setParallelism(parallelism);
	receiver.setInvokableClass(BlockingOperator.class);
	BlockingOperator.reset();

	receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
	sender.setSlotSharingGroup(slotSharingGroup);
	receiver.setSlotSharingGroup(slotSharingGroup);

	return new JobGraph("Blocking test job with slot sharing", sender, receiver);
}
 
Example #14
Source File: JobTaskVertexTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testConnectMultipleTargets() {
	JobVertex source = new JobVertex("source");
	JobVertex target1= new JobVertex("target1");
	JobVertex target2 = new JobVertex("target2");
	target1.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	target2.connectDataSetAsInput(source.getProducedDataSets().get(0), DistributionPattern.ALL_TO_ALL);
	
	assertTrue(source.isInputVertex());
	assertFalse(source.isOutputVertex());
	assertFalse(target1.isInputVertex());
	assertTrue(target1.isOutputVertex());
	assertFalse(target2.isInputVertex());
	assertTrue(target2.isOutputVertex());
	
	assertEquals(1, source.getNumberOfProducedIntermediateDataSets());
	assertEquals(2, source.getProducedDataSets().get(0).getConsumers().size());
	
	assertEquals(target1.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
	assertEquals(target2.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
}
 
Example #15
Source File: RestartAllFailoverStrategyTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetTasksNeedingRestart() {
	final TestingSchedulingTopology topology = new TestingSchedulingTopology();

	final TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
	final TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
	final TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();

	topology.connect(v1, v2, ResultPartitionType.PIPELINED);
	topology.connect(v2, v3, ResultPartitionType.BLOCKING);

	final RestartAllFailoverStrategy strategy = new RestartAllFailoverStrategy(topology);

	assertEquals(
		new HashSet<>(Arrays.asList(v1.getId(), v2.getId(), v3.getId())),
		strategy.getTasksNeedingRestart(v1.getId(), new Exception("Test failure")));
}
 
Example #16
Source File: RegionPartitionReleaseStrategyTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished() {
	final List<TestingSchedulingExecutionVertex> sourceVertices = testingSchedulingTopology.addExecutionVertices().finish();
	final List<TestingSchedulingExecutionVertex> intermediateVertices = testingSchedulingTopology.addExecutionVertices().finish();
	final List<TestingSchedulingExecutionVertex> sinkVertices = testingSchedulingTopology.addExecutionVertices().finish();
	final List<TestingSchedulingResultPartition> sourceResultPartitions = testingSchedulingTopology.connectAllToAll(sourceVertices, intermediateVertices).finish();
	testingSchedulingTopology.connectAllToAll(intermediateVertices, sinkVertices).withResultPartitionType(ResultPartitionType.PIPELINED).finish();

	final ExecutionVertexID onlyIntermediateVertexId = intermediateVertices.get(0).getId();
	final ExecutionVertexID onlySinkVertexId = sinkVertices.get(0).getId();
	final IntermediateResultPartitionID onlySourceResultPartitionId = sourceResultPartitions.get(0).getId();

	final RegionPartitionReleaseStrategy regionPartitionReleaseStrategy = new RegionPartitionReleaseStrategy(testingSchedulingTopology);

	regionPartitionReleaseStrategy.vertexFinished(onlyIntermediateVertexId);
	final List<IntermediateResultPartitionID> partitionsToRelease = regionPartitionReleaseStrategy.vertexFinished(onlySinkVertexId);
	assertThat(partitionsToRelease, contains(onlySourceResultPartitionId));
}
 
Example #17
Source File: ExecutionPartitionLifecycleTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void testPartitionReleaseOnStateTransitionsAfterRunning(Consumer<Execution> stateTransition1, Consumer<Execution> stateTransition2) throws Exception {
	final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
	final CompletableFuture<Tuple2<JobID, Collection<ResultPartitionID>>> releasePartitionsCallFuture = new CompletableFuture<>();
	taskManagerGateway.setReleasePartitionsConsumer(((jobID, partitionIds) -> releasePartitionsCallFuture.complete(Tuple2.of(jobID, partitionIds))));

	final TestingShuffleMaster testingShuffleMaster = new TestingShuffleMaster();

	setupExecutionGraphAndStartRunningJob(ResultPartitionType.PIPELINED, NoOpPartitionTracker.INSTANCE, taskManagerGateway, testingShuffleMaster);

	stateTransition1.accept(execution);
	assertFalse(releasePartitionsCallFuture.isDone());

	stateTransition2.accept(execution);
	assertTrue(releasePartitionsCallFuture.isDone());

	final Tuple2<JobID, Collection<ResultPartitionID>> releasePartitionsCall = releasePartitionsCallFuture.get();
	assertEquals(jobId, releasePartitionsCall.f0);
	assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), releasePartitionsCall.f1);

	assertEquals(1, testingShuffleMaster.externallyReleasedPartitions.size());
	assertEquals(descriptor.getShuffleDescriptor(), testingShuffleMaster.externallyReleasedPartitions.poll());
}
 
Example #18
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		shuffleDescriptor.getResultPartitionID().getPartitionId(),
		ResultPartitionType.PIPELINED,
		1,
		0);
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example #19
Source File: JobVertex.java    From flink with Apache License 2.0 5 votes vote down vote up
public IntermediateDataSet createAndAddResultDataSet(
		IntermediateDataSetID id,
		ResultPartitionType partitionType) {

	IntermediateDataSet result = new IntermediateDataSet(id, partitionType, this);
	this.results.add(result);
	return result;
}
 
Example #20
Source File: RestartPipelinedRegionStrategyBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests the below topology.
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1)
 *           X
 *     (a2) -+-> (b2) -+-> (c2)
 *
 *           ^         ^
 *           |         |
 *     (pipelined) (blocking)
 * </pre>
 */
@Test
public void testTwoComponentsViaBlockingExchange() throws Exception {
	TestFailoverTopology.Builder topologyBuilder = new TestFailoverTopology.Builder();

	TestFailoverTopology.TestFailoverVertex va1 = topologyBuilder.newVertex();
	TestFailoverTopology.TestFailoverVertex va2 = topologyBuilder.newVertex();
	TestFailoverTopology.TestFailoverVertex vb1 = topologyBuilder.newVertex();
	TestFailoverTopology.TestFailoverVertex vb2 = topologyBuilder.newVertex();
	TestFailoverTopology.TestFailoverVertex vc1 = topologyBuilder.newVertex();
	TestFailoverTopology.TestFailoverVertex vc2 = topologyBuilder.newVertex();

	topologyBuilder
		.connect(va1, vb1, ResultPartitionType.PIPELINED)
		.connect(va1, vb2, ResultPartitionType.PIPELINED)
		.connect(va2, vb1, ResultPartitionType.PIPELINED)
		.connect(va2, vb2, ResultPartitionType.PIPELINED)
		.connect(vb1, vc1, ResultPartitionType.BLOCKING)
		.connect(vb2, vc2, ResultPartitionType.BLOCKING);

	FailoverTopology topology = topologyBuilder.build();

	RestartPipelinedRegionStrategy strategy = new RestartPipelinedRegionStrategy(topology);

	FailoverRegion ra1 = strategy.getFailoverRegion(va1.getExecutionVertexID());
	FailoverRegion ra2 = strategy.getFailoverRegion(va2.getExecutionVertexID());
	FailoverRegion rb1 = strategy.getFailoverRegion(vb1.getExecutionVertexID());
	FailoverRegion rb2 = strategy.getFailoverRegion(vb2.getExecutionVertexID());
	FailoverRegion rc1 = strategy.getFailoverRegion(vc1.getExecutionVertexID());
	FailoverRegion rc2 = strategy.getFailoverRegion(vc2.getExecutionVertexID());

	assertSameRegion(ra1, ra2, rb1, rb2);

	assertDistinctRegions(ra1, rc1, rc2);
}
 
Example #21
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithAFailingSenderVertex() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(ExceptionSender.class);
		sender.setParallelism(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception").isPresent());
		}
	}
}
 
Example #22
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test setting shuffle mode to {@link ShuffleMode#BATCH}.
 */
@Test
public void testShuffleModeBatch() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Map -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
			sourceDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.BATCH));
	DataStream<Integer> mapDataStream = partitionAfterSourceDataStream.map(value -> value).setParallelism(1);

	DataStream<Integer> partitionAfterMapDataStream = new DataStream<>(env, new PartitionTransformation<>(
			mapDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.BATCH));
	partitionAfterMapDataStream.print().setParallelism(2);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(3, verticesSorted.size());

	// it can not be chained with BATCH shuffle mode
	JobVertex sourceVertex = verticesSorted.get(0);
	JobVertex mapVertex = verticesSorted.get(1);

	// BATCH shuffle mode is translated into BLOCKING result partition
	assertEquals(ResultPartitionType.BLOCKING,
		sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.BLOCKING,
		mapVertex.getProducedDataSets().get(0).getResultType());
}
 
Example #23
Source File: JobGraphGenerator.java    From flink with Apache License 2.0 5 votes vote down vote up
private boolean checkAndConfigurePersistentIntermediateResult(PlanNode node) {
	if (!(node instanceof SinkPlanNode)) {
		return false;
	}

	final Object userCodeObject = node.getProgramOperator().getUserCodeWrapper().getUserCodeObject();
	if (!(userCodeObject instanceof BlockingShuffleOutputFormat)) {
		return false;
	}

	final Iterator<Channel> inputIterator = node.getInputs().iterator();
	checkState(inputIterator.hasNext(), "SinkPlanNode must have a input.");

	final PlanNode predecessorNode = inputIterator.next().getSource();
	final JobVertex predecessorVertex = (vertices.containsKey(predecessorNode)) ?
		vertices.get(predecessorNode) :
		chainedTasks.get(predecessorNode).getContainingVertex();

	checkState(predecessorVertex != null, "Bug: Chained task has not been assigned its containing vertex when connecting.");

	predecessorVertex.createAndAddResultDataSet(
			// use specified intermediateDataSetID
			new IntermediateDataSetID(((BlockingShuffleOutputFormat) userCodeObject).getIntermediateDataSetId()),
			ResultPartitionType.BLOCKING_PERSISTENT);

	// remove this node so the OutputFormatVertex will not shown in the final JobGraph.
	vertices.remove(node);
	return true;
}
 
Example #24
Source File: PipelinedRegionSchedulingStrategy.java    From flink with Apache License 2.0 5 votes vote down vote up
private void init() {
	for (SchedulingPipelinedRegion region : schedulingTopology.getAllPipelinedRegions()) {
		for (SchedulingResultPartition partition : region.getConsumedResults()) {
			checkState(partition.getResultType() == ResultPartitionType.BLOCKING);

			partitionConsumerRegions.computeIfAbsent(partition.getId(), pid -> new HashSet<>()).add(region);
			correlatedResultPartitions.computeIfAbsent(partition.getResultId(), rid -> new HashSet<>()).add(partition);
		}
	}
}
 
Example #25
Source File: TaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testExecutionFailsInNetworkRegistrationForGates() throws Exception {
	final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote();
	final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor(
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		new ShuffleDescriptor[] { dummyChannel });
	testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singletonList(dummyGate));
}
 
Example #26
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCallFinalizeOnMasterBeforeJobCompletes() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex source = new JobVertex("Source");
		source.setInvokableClass(WaitingNoOpInvokable.class);
		source.setParallelism(parallelism);

		final WaitOnFinalizeJobVertex sink = new WaitOnFinalizeJobVertex("Sink", 20L);
		sink.setInvokableClass(NoOpInvokable.class);
		sink.setParallelism(parallelism);

		sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("SubtaskInFinalStateRaceCondition", source, sink);

		final CompletableFuture<JobSubmissionResult> submissionFuture = miniCluster.submitJob(jobGraph);

		final CompletableFuture<JobResult> jobResultFuture = submissionFuture.thenCompose(
			(JobSubmissionResult ignored) -> miniCluster.requestJobResult(jobGraph.getJobID()));

		jobResultFuture.get().toJobExecutionResult(getClass().getClassLoader());

		assertTrue(sink.finalizedOnMaster.get());
	}
}
 
Example #27
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * This test checks that are strictly co-located vertices are in the same failover region,
 * even through they are connected via a blocking pattern.
 * This is currently an assumption / limitation of the scheduler.
 */
@Test
public void testPipelinedOneToOneTopologyWithCoLocation() throws Exception {
	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(NoOpInvokable.class);
	source.setParallelism(10);

	final JobVertex target = new JobVertex("target");
	target.setInvokableClass(NoOpInvokable.class);
	target.setParallelism(10);

	target.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final SlotSharingGroup sharingGroup = new SlotSharingGroup();
	source.setSlotSharingGroup(sharingGroup);
	target.setSlotSharingGroup(sharingGroup);

	source.setStrictlyCoLocatedWith(target);

	final JobGraph jobGraph = new JobGraph("test job", source, target);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion sourceRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[0]);
	FailoverRegion sourceRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(source.getID()).getTaskVertices()[1]);
	FailoverRegion targetRegion1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[0]);
	FailoverRegion targetRegion2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(target.getID()).getTaskVertices()[1]);

	// we use 'assertTrue' here rather than 'assertEquals' because we want to test
	// for referential equality, to be on the safe side
	assertTrue(sourceRegion1 == sourceRegion2);
	assertTrue(sourceRegion2 == targetRegion1);
	assertTrue(targetRegion1 == targetRegion2);
}
 
Example #28
Source File: JobGraphTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testTopologicalSort1() {
	try {
		JobVertex source1 = new JobVertex("source1");
		JobVertex source2 = new JobVertex("source2");
		JobVertex target1 = new JobVertex("target1");
		JobVertex target2 = new JobVertex("target2");
		JobVertex intermediate1 = new JobVertex("intermediate1");
		JobVertex intermediate2 = new JobVertex("intermediate2");
		
		target1.connectNewDataSetAsInput(source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		target2.connectNewDataSetAsInput(source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		target2.connectNewDataSetAsInput(intermediate2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		intermediate2.connectNewDataSetAsInput(intermediate1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		intermediate1.connectNewDataSetAsInput(source2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

		JobGraph graph = new JobGraph("TestGraph",
			source1, source2, intermediate1, intermediate2, target1, target2);
		List<JobVertex> sorted = graph.getVerticesSortedTopologicallyFromSources();
		
		assertEquals(6, sorted.size());
		
		assertBefore(source1, target1, sorted);
		assertBefore(source1, target2, sorted);
		assertBefore(source2, target2, sorted);
		assertBefore(source2, intermediate1, sorted);
		assertBefore(source2, intermediate2, sorted);
		assertBefore(intermediate1, target2, sorted);
		assertBefore(intermediate2, target2, sorted);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #29
Source File: ExecutionVertexInputConstraintTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static List<JobVertex> createOrderedVertices() {
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");
	JobVertex v3 = new JobVertex("vertex3");
	v1.setParallelism(2);
	v2.setParallelism(2);
	v3.setParallelism(2);
	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);
	v3.setInvokableClass(AbstractInvokable.class);
	v3.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	return Arrays.asList(v1, v2, v3);
}
 
Example #30
Source File: ExecutionVertexInputConstraintTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static List<JobVertex> createOrderedVertices() {
	JobVertex v1 = new JobVertex("vertex1");
	JobVertex v2 = new JobVertex("vertex2");
	JobVertex v3 = new JobVertex("vertex3");
	v1.setParallelism(2);
	v2.setParallelism(2);
	v3.setParallelism(2);
	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);
	v3.setInvokableClass(AbstractInvokable.class);
	v3.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	return Arrays.asList(v1, v2, v3);
}