Java Code Examples for org.apache.flink.runtime.io.network.partition.ResultPartitionType

The following examples show how to use org.apache.flink.runtime.io.network.partition.ResultPartitionType. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: ShuffleDescriptorTest.java    License: Apache License 2.0 6 votes vote down vote up
private static ShuffleDescriptor getConsumedPartitionShuffleDescriptor(
		ResultPartitionID id,
		ExecutionState state,
		@Nullable ResultPartitionDeploymentDescriptor producedPartition,
		boolean allowLazyDeployment) {
	ShuffleDescriptor shuffleDescriptor = TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor(
		id,
		ResultPartitionType.PIPELINED,
		true,
		state,
		allowLazyDeployment,
		producedPartition);
	assertThat(shuffleDescriptor, is(notNullValue()));
	assertThat(shuffleDescriptor.getResultPartitionID(), is(id));
	return shuffleDescriptor;
}
 
Example 2
Source Project: flink   Source File: JobTaskVertexTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testConnectDirectly() {
	JobVertex source = new JobVertex("source");
	JobVertex target = new JobVertex("target");
	target.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	assertTrue(source.isInputVertex());
	assertFalse(source.isOutputVertex());
	assertFalse(target.isInputVertex());
	assertTrue(target.isOutputVertex());

	assertEquals(1, source.getNumberOfProducedIntermediateDataSets());
	assertEquals(1, target.getNumberOfInputs());

	assertEquals(target.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));

	assertEquals(1, source.getProducedDataSets().get(0).getConsumers().size());
	assertEquals(target, source.getProducedDataSets().get(0).getConsumers().get(0).getTarget());
}
 
Example 3
Source Project: flink   Source File: FileBufferReaderITCase.java    License: Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph() {
	final SlotSharingGroup group1 = new SlotSharingGroup();
	final SlotSharingGroup group2 = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(TestSourceInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(group1);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(TestSinkInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(group2);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES);

	return jobGraph;
}
 
Example 4
Source Project: flink   Source File: ExecutionPartitionLifecycleTest.java    License: Apache License 2.0 6 votes vote down vote up
private void testPartitionReleaseOnStateTransitionsAfterRunning(Consumer<Execution> stateTransition1, Consumer<Execution> stateTransition2) throws Exception {
	final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
	final CompletableFuture<Tuple2<JobID, Collection<ResultPartitionID>>> releasePartitionsCallFuture = new CompletableFuture<>();
	taskManagerGateway.setReleasePartitionsConsumer(((jobID, partitionIds) -> releasePartitionsCallFuture.complete(Tuple2.of(jobID, partitionIds))));

	final TestingShuffleMaster testingShuffleMaster = new TestingShuffleMaster();

	setupExecutionGraphAndStartRunningJob(ResultPartitionType.PIPELINED, NoOpJobMasterPartitionTracker.INSTANCE, taskManagerGateway, testingShuffleMaster);

	stateTransition1.accept(execution);
	assertFalse(releasePartitionsCallFuture.isDone());

	stateTransition2.accept(execution);
	assertTrue(releasePartitionsCallFuture.isDone());

	final Tuple2<JobID, Collection<ResultPartitionID>> releasePartitionsCall = releasePartitionsCallFuture.get();
	assertEquals(jobId, releasePartitionsCall.f0);
	assertThat(releasePartitionsCall.f1, contains(descriptor.getShuffleDescriptor().getResultPartitionID()));

	assertEquals(1, testingShuffleMaster.externallyReleasedPartitions.size());
	assertEquals(descriptor.getShuffleDescriptor(), testingShuffleMaster.externallyReleasedPartitions.poll());
}
 
Example 5
Source Project: flink   Source File: JobTaskVertexTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testConnectMultipleTargets() {
	JobVertex source = new JobVertex("source");
	JobVertex target1 = new JobVertex("target1");
	JobVertex target2 = new JobVertex("target2");
	target1.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	target2.connectDataSetAsInput(source.getProducedDataSets().get(0), DistributionPattern.ALL_TO_ALL);

	assertTrue(source.isInputVertex());
	assertFalse(source.isOutputVertex());
	assertFalse(target1.isInputVertex());
	assertTrue(target1.isOutputVertex());
	assertFalse(target2.isInputVertex());
	assertTrue(target2.isOutputVertex());

	assertEquals(1, source.getNumberOfProducedIntermediateDataSets());
	assertEquals(2, source.getProducedDataSets().get(0).getConsumers().size());

	assertEquals(target1.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
	assertEquals(target2.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
}
 
Example 6
Source Project: flink   Source File: TaskExecutorSubmissionTest.java    License: Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		shuffleDescriptor.getResultPartitionID().getPartitionId(),
		ResultPartitionType.PIPELINED,
		1,
		0);
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example 7
Source Project: flink   Source File: RestartAllFailoverStrategyTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetTasksNeedingRestart() {
	final TestingSchedulingTopology topology = new TestingSchedulingTopology();

	final TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
	final TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
	final TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();

	topology.connect(v1, v2, ResultPartitionType.PIPELINED);
	topology.connect(v2, v3, ResultPartitionType.BLOCKING);

	final RestartAllFailoverStrategy strategy = new RestartAllFailoverStrategy(topology);

	assertEquals(
		new HashSet<>(Arrays.asList(v1.getId(), v2.getId(), v3.getId())),
		strategy.getTasksNeedingRestart(v1.getId(), new Exception("Test failure")));
}
 
Example 8
Source Project: flink   Source File: ShuffleCompressionITCase.java    License: Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph(
		ScheduleMode scheduleMode,
		ResultPartitionType resultPartitionType,
		ExecutionMode executionMode) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	JobVertex source = new JobVertex("source");
	source.setInvokableClass(LongValueSource.class);
	source.setParallelism(PARALLELISM);
	source.setSlotSharingGroup(slotSharingGroup);

	JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(ResultVerifyingSink.class);
	sink.setParallelism(PARALLELISM);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, resultPartitionType);
	JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(scheduleMode);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setExecutionMode(executionMode);
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example 9
Source Project: Flink-CEPplus   Source File: JobExecutionITCase.java    License: Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(int parallelism) {
	final JobVertex sender = new JobVertex("Sender");
	sender.setParallelism(parallelism);
	sender.setInvokableClass(TestingAbstractInvokables.Sender.class);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setParallelism(parallelism);
	receiver.setInvokableClass(TestingAbstractInvokables.Receiver.class);

	// In order to make testCoLocationConstraintJobExecution fail, one needs to
	// remove the co-location constraint and the slot sharing groups, because then
	// the receivers will have to wait for the senders to finish and the slot
	// assignment order to the receivers is non-deterministic (depending on the
	// order in which the senders finish).
	final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
	receiver.setSlotSharingGroup(slotSharingGroup);
	sender.setSlotSharingGroup(slotSharingGroup);
	receiver.setStrictlyCoLocatedWith(sender);

	receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph(getClass().getSimpleName(), sender, receiver);

	return jobGraph;
}
 
Example 10
Source Project: Flink-CEPplus   Source File: TaskExecutorITCase.java    License: Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(int parallelism) {
	final JobVertex sender = new JobVertex("Sender");
	sender.setParallelism(parallelism);
	sender.setInvokableClass(TestingAbstractInvokables.Sender.class);

	final JobVertex receiver = new JobVertex("Blocking receiver");
	receiver.setParallelism(parallelism);
	receiver.setInvokableClass(BlockingOperator.class);
	BlockingOperator.reset();

	receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
	sender.setSlotSharingGroup(slotSharingGroup);
	receiver.setSlotSharingGroup(slotSharingGroup);

	return new JobGraph("Blocking test job with slot sharing", sender, receiver);
}
 
Example 11
Source Project: Flink-CEPplus   Source File: JobTaskVertexTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testConnectMultipleTargets() {
	JobVertex source = new JobVertex("source");
	JobVertex target1= new JobVertex("target1");
	JobVertex target2 = new JobVertex("target2");
	target1.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	target2.connectDataSetAsInput(source.getProducedDataSets().get(0), DistributionPattern.ALL_TO_ALL);
	
	assertTrue(source.isInputVertex());
	assertFalse(source.isOutputVertex());
	assertFalse(target1.isInputVertex());
	assertTrue(target1.isOutputVertex());
	assertFalse(target2.isInputVertex());
	assertTrue(target2.isOutputVertex());
	
	assertEquals(1, source.getNumberOfProducedIntermediateDataSets());
	assertEquals(2, source.getProducedDataSets().get(0).getConsumers().size());
	
	assertEquals(target1.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
	assertEquals(target2.getInputs().get(0).getSource(), source.getProducedDataSets().get(0));
}
 
Example 12
Source Project: flink   Source File: IntermediateResult.java    License: Apache License 2.0 6 votes vote down vote up
public IntermediateResult(
		IntermediateDataSetID id,
		ExecutionJobVertex producer,
		int numParallelProducers,
		ResultPartitionType resultType) {

	this.id = checkNotNull(id);
	this.producer = checkNotNull(producer);

	checkArgument(numParallelProducers >= 1);
	this.numParallelProducers = numParallelProducers;

	this.partitions = new IntermediateResultPartition[numParallelProducers];

	this.numberOfRunningProducers = new AtomicInteger(numParallelProducers);

	// we do not set the intermediate result partitions here, because we let them be initialized by
	// the execution vertex that produces them

	// assign a random connection index
	this.connectionIndex = (int) (Math.random() * Integer.MAX_VALUE);

	// The runtime type for this produced result
	this.resultType = checkNotNull(resultType);
}
 
Example 13
Source Project: flink   Source File: PipelinedRegionComputeUtilTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This test checks that are strictly co-located vertices are in the same failover region,
 * even through they are not connected.
 * This is currently an assumption / limitation of the scheduler.
 * <pre>
 *     (a1) -+-> (b1)
 *
 *     (a2) -+-> (b2)
 * </pre>
 */
@Test
public void testPipelinedOneToOneTopologyWithCoLocation() {
	TestingSchedulingTopology topology = new TestingSchedulingTopology();

	TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();

	topology
		.connect(va1, vb1, ResultPartitionType.PIPELINED)
		.connect(va2, vb2, ResultPartitionType.PIPELINED);

	topology.setContainsCoLocationConstraints(true);

	Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);

	Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
	Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
	Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
	Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());

	assertSameRegion(ra1, ra2, rb1, rb2);
}
 
Example 14
Source Project: flink   Source File: DefaultSchedulerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testInputConstraintALLPerf() throws Exception {
	final int parallelism = 1000;
	final JobVertex v1 = createVertexWithAllInputConstraints("vertex1", parallelism);
	final JobVertex v2 = createVertexWithAllInputConstraints("vertex2", parallelism);
	final JobVertex v3 = createVertexWithAllInputConstraints("vertex3", parallelism);
	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
	v2.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(v1, v2, v3);
	final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
	final AccessExecutionJobVertex ejv1 = scheduler.requestJob().getAllVertices().get(v1.getID());

	for (int i = 0; i < parallelism - 1; i++) {
		finishSubtask(scheduler, ejv1, i);
	}

	final long startTime = System.nanoTime();
	finishSubtask(scheduler, ejv1, parallelism - 1);

	final Duration duration = Duration.ofNanos(System.nanoTime() - startTime);
	final Duration timeout = Duration.ofSeconds(5);

	assertThat(duration, lessThan(timeout));
}
 
Example 15
Source Project: flink   Source File: InputGateTestBase.java    License: Apache License 2.0 6 votes vote down vote up
protected SingleInputGate createInputGate(
	NettyShuffleEnvironment environment, int numberOfInputChannels, ResultPartitionType partitionType) {

	SingleInputGateBuilder builder = new SingleInputGateBuilder()
		.setNumberOfChannels(numberOfInputChannels)
		.setResultPartitionType(partitionType)
		.setIsCreditBased(enableCreditBasedFlowControl);

	if (environment != null) {
		builder = builder.setupBufferPoolFactory(environment);
	}

	SingleInputGate inputGate = builder.build();
	assertEquals(partitionType, inputGate.getConsumedPartitionType());
	return inputGate;
}
 
Example 16
Source Project: flink   Source File: RegionPartitionReleaseStrategyTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished() {
	final List<TestingSchedulingExecutionVertex> sourceVertices = testingSchedulingTopology.addExecutionVertices().finish();
	final List<TestingSchedulingExecutionVertex> intermediateVertices = testingSchedulingTopology.addExecutionVertices().finish();
	final List<TestingSchedulingExecutionVertex> sinkVertices = testingSchedulingTopology.addExecutionVertices().finish();
	final List<TestingSchedulingResultPartition> sourceResultPartitions = testingSchedulingTopology.connectAllToAll(sourceVertices, intermediateVertices).finish();
	testingSchedulingTopology.connectAllToAll(intermediateVertices, sinkVertices).withResultPartitionType(ResultPartitionType.PIPELINED).finish();

	final ExecutionVertexID onlyIntermediateVertexId = intermediateVertices.get(0).getId();
	final ExecutionVertexID onlySinkVertexId = sinkVertices.get(0).getId();
	final IntermediateResultPartitionID onlySourceResultPartitionId = sourceResultPartitions.get(0).getId();

	final RegionPartitionReleaseStrategy regionPartitionReleaseStrategy = new RegionPartitionReleaseStrategy(testingSchedulingTopology);

	regionPartitionReleaseStrategy.vertexFinished(onlyIntermediateVertexId);
	final List<IntermediateResultPartitionID> partitionsToRelease = regionPartitionReleaseStrategy.vertexFinished(onlySinkVertexId);
	assertThat(partitionsToRelease, contains(onlySourceResultPartitionId));
}
 
Example 17
Source Project: flink   Source File: ExecutionPartitionLifecycleTest.java    License: Apache License 2.0 6 votes vote down vote up
private void testPartitionReleaseOnStateTransitionsAfterRunning(Consumer<Execution> stateTransition1, Consumer<Execution> stateTransition2) throws Exception {
	final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
	final CompletableFuture<Tuple2<JobID, Collection<ResultPartitionID>>> releasePartitionsCallFuture = new CompletableFuture<>();
	taskManagerGateway.setReleasePartitionsConsumer(((jobID, partitionIds) -> releasePartitionsCallFuture.complete(Tuple2.of(jobID, partitionIds))));

	final TestingShuffleMaster testingShuffleMaster = new TestingShuffleMaster();

	setupExecutionGraphAndStartRunningJob(ResultPartitionType.PIPELINED, NoOpPartitionTracker.INSTANCE, taskManagerGateway, testingShuffleMaster);

	stateTransition1.accept(execution);
	assertFalse(releasePartitionsCallFuture.isDone());

	stateTransition2.accept(execution);
	assertTrue(releasePartitionsCallFuture.isDone());

	final Tuple2<JobID, Collection<ResultPartitionID>> releasePartitionsCall = releasePartitionsCallFuture.get();
	assertEquals(jobId, releasePartitionsCall.f0);
	assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), releasePartitionsCall.f1);

	assertEquals(1, testingShuffleMaster.externallyReleasedPartitions.size());
	assertEquals(descriptor.getShuffleDescriptor(), testingShuffleMaster.externallyReleasedPartitions.poll());
}
 
Example 18
@Test
public void testPipelinedPartitionConsumable() throws Exception {
	IntermediateResult result = createResult(ResultPartitionType.PIPELINED, 2);
	IntermediateResultPartition partition1 = result.getPartitions()[0];
	IntermediateResultPartition partition2 = result.getPartitions()[1];

	// Not consumable on init
	assertFalse(partition1.isConsumable());
	assertFalse(partition2.isConsumable());

	// Partition 1 consumable after data are produced
	partition1.markDataProduced();
	assertTrue(partition1.isConsumable());
	assertFalse(partition2.isConsumable());

	// Not consumable if failover happens
	result.resetForNewExecution();
	assertFalse(partition1.isConsumable());
	assertFalse(partition2.isConsumable());
}
 
Example 19
Source Project: flink   Source File: TestFailoverTopology.java    License: Apache License 2.0 5 votes vote down vote up
public Builder connect(TestFailoverVertex source, TestFailoverVertex target, ResultPartitionType partitionType, IntermediateResultPartitionID partitionID) {
	FailoverEdge edge = new TestFailoverEdge(partitionID, partitionType, source, target);
	source.addOuputEdge(edge);
	target.addInputEdge(edge);

	return this;
}
 
Example 20
Source Project: flink   Source File: ExecutionVertexLocalityTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a simple 2 vertex graph with a parallel source and a parallel target.
 */
private ExecutionGraph createTestGraph(int parallelism, boolean allToAll) throws Exception {

	JobVertex source = new JobVertex("source", sourceVertexId);
	source.setParallelism(parallelism);
	source.setInvokableClass(NoOpInvokable.class);

	JobVertex target = new JobVertex("source", targetVertexId);
	target.setParallelism(parallelism);
	target.setInvokableClass(NoOpInvokable.class);

	DistributionPattern connectionPattern = allToAll ? DistributionPattern.ALL_TO_ALL : DistributionPattern.POINTWISE;
	target.connectNewDataSetAsInput(source, connectionPattern, ResultPartitionType.PIPELINED);

	JobGraph testJob = new JobGraph(jobId, "test job", source, target);

	final Time timeout = Time.seconds(10L);
	return ExecutionGraphBuilder.buildGraph(
		null,
		testJob,
		new Configuration(),
		TestingUtils.defaultExecutor(),
		TestingUtils.defaultExecutor(),
		mock(SlotProvider.class),
		getClass().getClassLoader(),
		new StandaloneCheckpointRecoveryFactory(),
		timeout,
		new FixedDelayRestartStrategy(10, 0L),
		new UnregisteredMetricsGroup(),
		VoidBlobWriter.getInstance(),
		timeout,
		log,
		NettyShuffleMaster.INSTANCE,
		NoOpJobMasterPartitionTracker.INSTANCE);
}
 
Example 21
Source Project: Flink-CEPplus   Source File: RemoteInputChannelTest.java    License: Apache License 2.0 5 votes vote down vote up
private SingleInputGate createSingleInputGate() {
	return new SingleInputGate(
		"InputGate",
		new JobID(),
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		1,
		mock(TaskActions.class),
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
		true);
}
 
Example 22
Source Project: flink   Source File: PipelinedRegionComputeUtilTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Cascades of joins with partially blocking, partially pipelined exchanges.
 * <pre>
 *     (1)--+
 *          +--(5)-+
 *     (2)--+      |
 *              (blocking)
 *                 |
 *                 +--(7)
 *                 |
 *              (blocking)
 *     (3)--+      |
 *          +--(6)-+
 *     (4)--+
 * </pre>
 *
 * <p>Component 1: 1, 2, 5; component 2: 3,4,6; component 3: 7
 */
@Test
public void testMultipleComponentsViaCascadeOfJoins() {
	TestingSchedulingTopology topology = new TestingSchedulingTopology();

	TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
	TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();

	topology
		.connect(v1, v5, ResultPartitionType.PIPELINED)
		.connect(v2, v5, ResultPartitionType.PIPELINED)
		.connect(v3, v6, ResultPartitionType.PIPELINED)
		.connect(v4, v6, ResultPartitionType.PIPELINED)
		.connect(v5, v7, ResultPartitionType.BLOCKING)
		.connect(v6, v7, ResultPartitionType.BLOCKING);

	Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);

	Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
	Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
	Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
	Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
	Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
	Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
	Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());

	assertSameRegion(r1, r2, r5);
	assertSameRegion(r3, r4, r6);

	assertDistinctRegions(r1, r3, r7);
}
 
Example 23
Source Project: flink   Source File: StreamingJobGraphGeneratorTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Verify that "blockingConnectionsBetweenChains" is off by default.
 */
@Test
public void testBlockingAfterChainingOffDisabled() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Filter -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	// partition transformation with an undefined shuffle mode between source and filter
	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
		sourceDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.UNDEFINED));
	DataStream<Integer> filterDataStream = partitionAfterSourceDataStream.filter(value -> true).setParallelism(2);

	DataStream<Integer> partitionAfterFilterDataStream = new DataStream<>(env, new PartitionTransformation<>(
		filterDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.UNDEFINED));

	partitionAfterFilterDataStream.print().setParallelism(2);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(2, verticesSorted.size());

	JobVertex sourceVertex = verticesSorted.get(0);
	JobVertex filterAndPrintVertex = verticesSorted.get(1);

	assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.PIPELINED_BOUNDED,
			filterAndPrintVertex.getInputs().get(0).getSource().getResultType());
}
 
Example 24
Source Project: flink   Source File: SlotCountExceedingParallelismTest.java    License: Apache License 2.0 5 votes vote down vote up
private JobGraph createTestJobGraph(
		String jobName,
		int senderParallelism,
		int receiverParallelism) {

	// The sender and receiver invokable logic ensure that each subtask gets the expected data
	final JobVertex sender = new JobVertex("Sender");
	sender.setInvokableClass(RoundRobinSubtaskIndexSender.class);
	sender.getConfiguration().setInteger(RoundRobinSubtaskIndexSender.CONFIG_KEY, receiverParallelism);
	sender.setParallelism(senderParallelism);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setInvokableClass(SubtaskIndexReceiver.class);
	receiver.getConfiguration().setInteger(SubtaskIndexReceiver.CONFIG_KEY, senderParallelism);
	receiver.setParallelism(receiverParallelism);

	receiver.connectNewDataSetAsInput(
			sender,
			DistributionPattern.ALL_TO_ALL,
			ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(jobName, sender, receiver);

	// We need to allow queued scheduling, because there are not enough slots available
	// to run all tasks at once. We queue tasks and then let them finish/consume the blocking
	// result one after the other.
	jobGraph.setAllowQueuedScheduling(true);

	return jobGraph;
}
 
Example 25
Source Project: flink   Source File: StreamingJobGraphGeneratorTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test setting shuffle mode to {@link ShuffleMode#BATCH}.
 */
@Test
public void testShuffleModeBatch() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// fromElements -> Map -> Print
	DataStream<Integer> sourceDataStream = env.fromElements(1, 2, 3);

	DataStream<Integer> partitionAfterSourceDataStream = new DataStream<>(env, new PartitionTransformation<>(
			sourceDataStream.getTransformation(), new ForwardPartitioner<>(), ShuffleMode.BATCH));
	DataStream<Integer> mapDataStream = partitionAfterSourceDataStream.map(value -> value).setParallelism(1);

	DataStream<Integer> partitionAfterMapDataStream = new DataStream<>(env, new PartitionTransformation<>(
			mapDataStream.getTransformation(), new RescalePartitioner<>(), ShuffleMode.BATCH));
	partitionAfterMapDataStream.print().setParallelism(2);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertEquals(3, verticesSorted.size());

	// it can not be chained with BATCH shuffle mode
	JobVertex sourceVertex = verticesSorted.get(0);
	JobVertex mapVertex = verticesSorted.get(1);

	// BATCH shuffle mode is translated into BLOCKING result partition
	assertEquals(ResultPartitionType.BLOCKING,
		sourceVertex.getProducedDataSets().get(0).getResultType());
	assertEquals(ResultPartitionType.BLOCKING,
		mapVertex.getProducedDataSets().get(0).getResultType());
}
 
Example 26
Source Project: flink   Source File: JobVertex.java    License: Apache License 2.0 5 votes vote down vote up
public IntermediateDataSet createAndAddResultDataSet(
		IntermediateDataSetID id,
		ResultPartitionType partitionType) {

	IntermediateDataSet result = new IntermediateDataSet(id, partitionType, this);
	this.results.add(result);
	return result;
}
 
Example 27
Source Project: Flink-CEPplus   Source File: MiniClusterITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithAnOccasionallyFailingSenderVertex() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(SometimesExceptionSender.class);
		sender.setParallelism(parallelism);

		// set failing senders
		SometimesExceptionSender.configFailingSenders(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception").isPresent());
		}
	}
}
 
Example 28
Source Project: flink   Source File: JobGraphTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTopologicalSort1() {
	try {
		JobVertex source1 = new JobVertex("source1");
		JobVertex source2 = new JobVertex("source2");
		JobVertex target1 = new JobVertex("target1");
		JobVertex target2 = new JobVertex("target2");
		JobVertex intermediate1 = new JobVertex("intermediate1");
		JobVertex intermediate2 = new JobVertex("intermediate2");
		
		target1.connectNewDataSetAsInput(source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		target2.connectNewDataSetAsInput(source1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		target2.connectNewDataSetAsInput(intermediate2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		intermediate2.connectNewDataSetAsInput(intermediate1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
		intermediate1.connectNewDataSetAsInput(source2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

		JobGraph graph = new JobGraph("TestGraph",
			source1, source2, intermediate1, intermediate2, target1, target2);
		List<JobVertex> sorted = graph.getVerticesSortedTopologicallyFromSources();
		
		assertEquals(6, sorted.size());
		
		assertBefore(source1, target1, sorted);
		assertBefore(source1, target2, sorted);
		assertBefore(source2, target2, sorted);
		assertBefore(source2, intermediate1, sorted);
		assertBefore(source2, intermediate2, sorted);
		assertBefore(intermediate1, target2, sorted);
		assertBefore(intermediate2, target2, sorted);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 29
Source Project: Flink-CEPplus   Source File: MiniClusterITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithAllVerticesFailingDuringInstantiation() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(InstantiationErrorSender.class);
		sender.setParallelism(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception in constructor").isPresent());
		}
	}
}
 
Example 30
Source Project: Flink-CEPplus   Source File: MiniClusterITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithSomeVerticesFailingDuringInstantiation() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(SometimesInstantiationErrorSender.class);
		sender.setParallelism(parallelism);

		// set failing senders
		SometimesInstantiationErrorSender.configFailingSenders(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception in constructor").isPresent());
		}
	}
}