org.apache.flink.runtime.jobgraph.ScheduleMode Java Examples

The following examples show how to use org.apache.flink.runtime.jobgraph.ScheduleMode. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExecutionGraphTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static Execution getExecution(
		final JobVertexID jid,
		final int subtaskIndex,
		final int numTasks,
		final SlotSharingGroup slotSharingGroup,
		@Nullable final TaskManagerLocation... locations) throws Exception {

	final ExecutionJobVertex ejv = getExecutionJobVertex(
		jid,
		numTasks,
		slotSharingGroup,
		new DirectScheduledExecutorService(),
		ScheduleMode.LAZY_FROM_SOURCES);
	final TestExecutionVertex ev = new TestExecutionVertex(
		ejv,
		subtaskIndex,
		new IntermediateResult[0],
		DEFAULT_TIMEOUT);

	if (locations != null) {
		ev.setPreferredLocationFutures(mapToPreferredLocationFutures(locations));
	}

	return ev.getCurrentExecutionAttempt();
}
 
Example #2
Source File: BatchExecutor.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamGraph generateStreamGraph(List<Transformation<?>> transformations, String jobName) {
	StreamExecutionEnvironment execEnv = getExecutionEnvironment();
	setBatchProperties(execEnv);
	transformations.forEach(execEnv::addOperator);
	StreamGraph streamGraph;
	streamGraph = execEnv.getStreamGraph(getNonEmptyJobName(jobName));
	// All transformations should set managed memory size.
	ResourceSpec managedResourceSpec = NodeResourceUtil.fromManagedMem(0);
	streamGraph.getStreamNodes().forEach(sn -> {
		if (sn.getMinResources().equals(ResourceSpec.DEFAULT)) {
			sn.setResources(managedResourceSpec, managedResourceSpec);
		}
	});
	streamGraph.setChaining(true);
	streamGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST);
	streamGraph.setStateBackend(null);
	if (streamGraph.getCheckpointConfig().isCheckpointingEnabled()) {
		throw new IllegalArgumentException("Checkpoint is not supported for batch jobs.");
	}
	if (isShuffleModeAllBatch()) {
		streamGraph.setBlockingConnectionsBetweenChains(true);
	}
	return streamGraph;
}
 
Example #3
Source File: SchedulingITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Nonnull
private JobGraph createJobGraph(long delay, int parallelism) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(OneTimeFailingInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(slotSharingGroup);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(NoOpInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	JobGraph jobGraph = new JobGraph(source, sink);

	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay));
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example #4
Source File: FileBufferReaderITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph() {
	final SlotSharingGroup group1 = new SlotSharingGroup();
	final SlotSharingGroup group2 = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(TestSourceInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(group1);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(TestSinkInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(group2);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES);

	return jobGraph;
}
 
Example #5
Source File: ShuffleCompressionITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph createJobGraph(
		ScheduleMode scheduleMode,
		ResultPartitionType resultPartitionType,
		ExecutionMode executionMode) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	JobVertex source = new JobVertex("source");
	source.setInvokableClass(LongValueSource.class);
	source.setParallelism(PARALLELISM);
	source.setSlotSharingGroup(slotSharingGroup);

	JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(ResultVerifyingSink.class);
	sink.setParallelism(PARALLELISM);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.ALL_TO_ALL, resultPartitionType);
	JobGraph jobGraph = new JobGraph(source, sink);
	jobGraph.setScheduleMode(scheduleMode);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setExecutionMode(executionMode);
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example #6
Source File: SchedulingITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Nonnull
private JobGraph createJobGraph(long delay, int parallelism) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(OneTimeFailingInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(slotSharingGroup);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(NoOpInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	JobGraph jobGraph = new JobGraph(source, sink);

	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay));
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example #7
Source File: DefaultSchedulerFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
private static ExecutionSlotAllocatorFactory createExecutionSlotAllocatorFactory(
		final ScheduleMode scheduleMode,
		final SlotProvider slotProvider,
		final Time slotRequestTimeout,
		final SchedulingStrategyFactory schedulingStrategyFactory) {

	if (schedulingStrategyFactory instanceof PipelinedRegionSchedulingStrategy.Factory) {
		return new OneSlotPerExecutionSlotAllocatorFactory(
			slotProvider,
			scheduleMode != ScheduleMode.LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST,
			slotRequestTimeout);
	} else {
		final SlotProviderStrategy slotProviderStrategy = SlotProviderStrategy.from(
			scheduleMode,
			slotProvider,
			slotRequestTimeout);

		return new DefaultExecutionSlotAllocatorFactory(slotProviderStrategy);
	}
}
 
Example #8
Source File: AdaptedRestartPipelinedRegionStrategyNGFailoverTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creating job graph as below (execution view).
 * It's a representative of batch job.
 * <pre>
 *     (v11) -+-> (v21)
 *            x
 *     (v12) -+-> (v22)
 *
 *            ^
 *            |
 *        (blocking)
 * </pre>
 * 4 regions. Each consists of one individual vertex.
 */
private JobGraph createBatchJobGraph() {
	final JobVertex v1 = new JobVertex("vertex1");
	final JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(2);
	v2.setParallelism(2);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph(v1, v2);
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES);

	return jobGraph;
}
 
Example #9
Source File: SlotProviderStrategy.java    From flink with Apache License 2.0 6 votes vote down vote up
static SlotProviderStrategy from(
	ScheduleMode scheduleMode,
	SlotProvider slotProvider,
	Time allocationTimeout,
	boolean allowQueuedScheduling) {

	switch (scheduleMode) {
		case LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST:
			return new BatchSlotProviderStrategy(slotProvider, allowQueuedScheduling);
		case LAZY_FROM_SOURCES:
		case EAGER:
			return new NormalSlotProviderStrategy(slotProvider, allocationTimeout, allowQueuedScheduling);
		default:
			throw new IllegalArgumentException(String.format("Unknown scheduling mode: %s", scheduleMode));
	}
}
 
Example #10
Source File: SchedulingUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static CompletableFuture<Void> schedule(
		ScheduleMode scheduleMode,
		final Iterable<ExecutionVertex> vertices,
		final ExecutionGraph executionGraph) {

	switch (scheduleMode) {
		case LAZY_FROM_SOURCES:
		case LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST:
			return scheduleLazy(vertices, executionGraph);

		case EAGER:
			return scheduleEager(vertices, executionGraph);

		default:
			throw new IllegalStateException(String.format("Schedule mode %s is invalid.", scheduleMode));
	}
}
 
Example #11
Source File: AdaptedRestartPipelinedRegionStrategyNGFailoverTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creating job graph as below (execution view).
 * It's a representative of streaming job.
 * <pre>
 *     (v11) -+-> (v21)
 *
 *     (v12) -+-> (v22)
 *
 *            ^
 *            |
 *       (pipelined)
 * </pre>
 * 2 regions. Each has 2 pipelined connected vertices.
 */
private JobGraph createStreamingJobGraph() {
	final JobVertex v1 = new JobVertex("vertex1");
	final JobVertex v2 = new JobVertex("vertex2");

	v1.setParallelism(2);
	v2.setParallelism(2);

	v1.setInvokableClass(AbstractInvokable.class);
	v2.setInvokableClass(AbstractInvokable.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Testjob", v1, v2);
	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	return jobGraph;
}
 
Example #12
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobGraph nonParallelSourceSinkJobGraph() {
	final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Testjob");
	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(NoOpInvokable.class);
	jobGraph.addVertex(source);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(NoOpInvokable.class);
	jobGraph.addVertex(sink);

	sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	return jobGraph;
}
 
Example #13
Source File: SchedulingITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Nonnull
private JobGraph createJobGraph(long delay, int parallelism) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(OneTimeFailingInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(slotSharingGroup);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(NoOpInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	JobGraph jobGraph = new JobGraph(source, sink);

	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay));
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example #14
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private DefaultScheduler createSchedulerAndStartScheduling(final JobGraph jobGraph) {
	final SchedulingStrategyFactory schedulingStrategyFactory =
		jobGraph.getScheduleMode() == ScheduleMode.LAZY_FROM_SOURCES ?
			new LazyFromSourcesSchedulingStrategy.Factory() :
			new EagerSchedulingStrategy.Factory();

	try {
		final DefaultScheduler scheduler = createScheduler(jobGraph, schedulingStrategyFactory);
		startScheduling(scheduler);
		return scheduler;
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
}
 
Example #15
Source File: BlockingPartitionBenchmark.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
private void executeBenchmark(StreamExecutionEnvironment env) throws Exception {
	DataStreamSource<Long> source = env.addSource(new LongSource(RECORDS_PER_INVOCATION));
	source.addSink(new DiscardingSink<>());

	StreamGraph streamGraph = env.getStreamGraph();
	streamGraph.setChaining(false);
	streamGraph.setGlobalDataExchangeMode(GlobalDataExchangeMode.ALL_EDGES_BLOCKING);
	streamGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST);

	env.execute(streamGraph);
}
 
Example #16
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test schedule mode is configurable or not.
 */
@Test
public void testSetScheduleMode() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	StreamGraph streamGraph = new StreamGraphGenerator(Collections.emptyList(),
		env.getConfig(), env.getCheckpointConfig())
		.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES)
		.generate();
	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
	assertEquals(ScheduleMode.LAZY_FROM_SOURCES, jobGraph.getScheduleMode());
}
 
Example #17
Source File: ExecutionGraphRestartTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * SlotPool#failAllocation should not fail with a {@link java.util.ConcurrentModificationException}
 * if there is a concurrent scheduling operation. See FLINK-13421.
 */
@Test
public void slotPoolExecutionGraph_ConcurrentSchedulingAndAllocationFailure_ShouldNotFailWithConcurrentModificationException() throws Exception {
	final SlotSharingGroup group = new SlotSharingGroup();
	final JobVertex vertex1 = createNoOpVertex("vertex1", 1);
	vertex1.setSlotSharingGroup(group);
	final JobVertex vertex2 = createNoOpVertex("vertex2", 3);
	vertex2.setSlotSharingGroup(group);
	final JobVertex vertex3 = createNoOpVertex("vertex3", 1);
	vertex3.setSlotSharingGroup(group);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	try (SlotPool slotPool = createSlotPoolImpl()) {
		final SlotProvider slots = createSchedulerWithSlots(slotPool, new LocalTaskManagerLocation(), 2);

		final AllocationID allocationId = slotPool.getAvailableSlotsInformation().iterator().next().getAllocationId();

		final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Test Job", vertex1, vertex2, vertex3);
		jobGraph.setScheduleMode(ScheduleMode.EAGER);
		final ExecutionGraph eg = TestingExecutionGraphBuilder
			.newBuilder()
			.setJobGraph(jobGraph)
			.setSlotProvider(slots)
			.setAllocationTimeout(Time.minutes(60))
			.build();

		startAndScheduleExecutionGraph(eg);

		slotPool.failAllocation(
			allocationId,
			new Exception("test exception"));

		eg.waitUntilTerminal();
	}
}
 
Example #18
Source File: ExecutionVertexDeploymentTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = ExecutionGraphTestUtils.getExecutionJobVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID, scheduleMode.allowLazyDeployment()).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example #19
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private void setupAndRunHandleJobsWhenNotEnoughSlots(ScheduleMode scheduleMode) throws Exception {
	final JobVertex vertex = new JobVertex("Test Vertex");
	vertex.setParallelism(2);
	vertex.setMaxParallelism(2);
	vertex.setInvokableClass(BlockingNoOpInvokable.class);

	final JobGraph jobGraph = new JobGraph("Test Job", vertex);
	jobGraph.setScheduleMode(scheduleMode);

	runHandleJobsWhenNotEnoughSlots(jobGraph);
}
 
Example #20
Source File: MiniClusterITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private static JobGraph getSimpleJob(int parallelism) throws IOException {
	final JobVertex task = new JobVertex("Test task");
	task.setParallelism(parallelism);
	task.setMaxParallelism(parallelism);
	task.setInvokableClass(NoOpInvokable.class);

	final JobGraph jg = new JobGraph(new JobID(), "Test Job", task);
	jg.setScheduleMode(ScheduleMode.EAGER);

	final ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
	jg.setExecutionConfig(executionConfig);

	return jg;
}
 
Example #21
Source File: SchedulerTestingUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static DefaultExecutionSlotAllocatorFactory createDefaultExecutionSlotAllocatorFactory(
		final ScheduleMode scheduleMode,
		final SlotProvider slotProvider,
		final Time slotRequestTimeout) {

	final SlotProviderStrategy slotProviderStrategy = SlotProviderStrategy.from(
		scheduleMode,
		slotProvider,
		slotRequestTimeout);

	return new DefaultExecutionSlotAllocatorFactory(slotProviderStrategy);
}
 
Example #22
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void scheduleWithLazyStrategy() {
	final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES);
	final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);

	createSchedulerAndStartScheduling(jobGraph);

	final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();

	final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
	assertThat(deployedExecutionVertices, contains(executionVertexId));
}
 
Example #23
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static JobGraph singleJobVertexJobGraph(final int parallelism) {
	final JobGraph jobGraph = new JobGraph(TEST_JOB_ID, "Testjob");
	jobGraph.setScheduleMode(ScheduleMode.EAGER);
	final JobVertex vertex = new JobVertex("source");
	vertex.setInvokableClass(NoOpInvokable.class);
	vertex.setParallelism(parallelism);
	jobGraph.addVertex(vertex);
	return jobGraph;
}
 
Example #24
Source File: ExecutionGraphTestUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static ExecutionJobVertex getExecutionJobVertex(
		JobVertexID id,
		ScheduledExecutorService executor,
		ScheduleMode scheduleMode) throws Exception {

	return getExecutionJobVertex(id, 1, null, executor, scheduleMode);
}
 
Example #25
Source File: ExecutionGraphSchedulingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that an ongoing scheduling operation does not fail the {@link ExecutionGraph}
 * if it gets concurrently cancelled.
 */
@Test
public void testSchedulingOperationCancellationWhenCancel() throws Exception {
	final JobVertex jobVertex = new JobVertex("NoOp JobVertex");
	jobVertex.setInvokableClass(NoOpInvokable.class);
	jobVertex.setParallelism(2);
	final JobGraph jobGraph = new JobGraph(jobVertex);
	jobGraph.setScheduleMode(ScheduleMode.EAGER);
	jobGraph.setAllowQueuedScheduling(true);

	final CompletableFuture<LogicalSlot> slotFuture1 = new CompletableFuture<>();
	final CompletableFuture<LogicalSlot> slotFuture2 = new CompletableFuture<>();
	final ProgrammedSlotProvider slotProvider = new ProgrammedSlotProvider(2);
	slotProvider.addSlots(jobVertex.getID(), new CompletableFuture[]{slotFuture1, slotFuture2});
	final ExecutionGraph executionGraph = createExecutionGraph(jobGraph, slotProvider);

	executionGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
	executionGraph.scheduleForExecution();

	final TestingLogicalSlot slot = createTestingSlot();
	final CompletableFuture<?> releaseFuture = slot.getReleaseFuture();
	slotFuture1.complete(slot);

	// cancel should change the state of all executions to CANCELLED
	executionGraph.cancel();

	// complete the now CANCELLED execution --> this should cause a failure
	slotFuture2.complete(new TestingLogicalSlotBuilder().createTestingLogicalSlot());

	Thread.sleep(1L);
	// release the first slot to finish the cancellation
	releaseFuture.complete(null);

	// NOTE: This test will only occasionally fail without the fix since there is
	// a race between the releaseFuture and the slotFuture2
	assertThat(executionGraph.getTerminationFuture().get(), is(JobStatus.CANCELED));
}
 
Example #26
Source File: DefaultExecutionSlotAllocatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private DefaultExecutionSlotAllocator createExecutionSlotAllocator(
		final StateLocationRetriever stateLocationRetriever,
		final InputsLocationsRetriever inputsLocationsRetriever) {
	return new DefaultExecutionSlotAllocator(
		SlotProviderStrategy.from(
			ScheduleMode.EAGER,
			slotProvider,
			Time.seconds(10)),
		new DefaultPreferredLocationsRetriever(stateLocationRetriever, inputsLocationsRetriever));
}
 
Example #27
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test default schedule mode.
 */
@Test
public void testDefaultScheduleMode() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	// use eager schedule mode by default
	StreamGraph streamGraph = new StreamGraphGenerator(Collections.emptyList(),
		env.getConfig(), env.getCheckpointConfig())
		.generate();
	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
	assertEquals(ScheduleMode.EAGER, jobGraph.getScheduleMode());
}
 
Example #28
Source File: ExecutionGraphRestartTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * SlotPool#failAllocation should not fail with a {@link java.util.ConcurrentModificationException}
 * if there is a concurrent scheduling operation. See FLINK-13421.
 */
@Test
public void slotPoolExecutionGraph_ConcurrentSchedulingAndAllocationFailure_ShouldNotFailWithConcurrentModificationException() throws Exception {
	final SlotSharingGroup group = new SlotSharingGroup();
	final JobVertex vertex1 = createNoOpVertex("vertex1", 1);
	vertex1.setSlotSharingGroup(group);
	final JobVertex vertex2 = createNoOpVertex("vertex2", 3);
	vertex2.setSlotSharingGroup(group);
	final JobVertex vertex3 = createNoOpVertex("vertex3", 1);
	vertex3.setSlotSharingGroup(group);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);

	try (SlotPool slotPool = createSlotPoolImpl()) {
		final SlotProvider slots = createSchedulerWithSlots(2, slotPool, new LocalTaskManagerLocation());

		final AllocationID allocationId = slotPool.getAvailableSlotsInformation().iterator().next().getAllocationId();

		final ExecutionGraph eg = new ExecutionGraphTestUtils.TestingExecutionGraphBuilder(TEST_JOB_ID, vertex1, vertex2, vertex3)
			.setSlotProvider(slots)
			.setAllocationTimeout(Time.minutes(60))
			.setScheduleMode(ScheduleMode.EAGER)
			.setAllowQueuedScheduling(true)
			.build();

		eg.start(mainThreadExecutor);

		eg.scheduleForExecution();

		slotPool.failAllocation(
			allocationId,
			new Exception("test exception"));

		eg.waitUntilTerminal();
	}
}
 
Example #29
Source File: ExecutionVertexDeploymentTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = getExecutionVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example #30
Source File: StreamingJobGraphGeneratorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test schedule mode is configurable or not.
 */
@Test
public void testSetScheduleMode() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	StreamGraph streamGraph = new StreamGraphGenerator(Collections.emptyList(),
		env.getConfig(), env.getCheckpointConfig())
		.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES)
		.generate();
	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
	assertEquals(ScheduleMode.LAZY_FROM_SOURCES, jobGraph.getScheduleMode());
}