org.apache.flink.runtime.deployment.TaskDeploymentDescriptor Java Examples

The following examples show how to use org.apache.flink.runtime.deployment.TaskDeploymentDescriptor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable,
	int maxNumberOfSubtasks,
	List<ResultPartitionDeploymentDescriptor> producedPartitions,
	List<InputGateDeploymentDescriptor> inputGates
) throws IOException {
	Preconditions.checkNotNull(producedPartitions);
	Preconditions.checkNotNull(inputGates);
	return createTaskDeploymentDescriptor(
		jobId, testName.getMethodName(), eid,
		new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0,
		new Configuration(), new Configuration(), abstractInvokable.getName(),
		producedPartitions,
		inputGates,
		Collections.emptyList(),
		Collections.emptyList(),
		0);
}
 
Example #2
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = new PartitionDescriptor(
		new IntermediateDataSetID(),
		shuffleDescriptor.getResultPartitionID().getPartitionId(),
		ResultPartitionType.PIPELINED,
		1,
		0);
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example #3
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the TaskManager sends a proper exception back to the sender if the submit task
 * message fails.
 */
@Test(timeout = 10000L)
public void testSubmitTaskFailure() throws Exception {
	final ExecutionAttemptID eid = new ExecutionAttemptID();

	final TaskDeploymentDescriptor tdd = createTestTaskDeploymentDescriptor(
		"test task",
		eid,
		BlockingNoOpInvokable.class,
		0); // this will make the submission fail because the number of key groups must be >= 1

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();
	} catch (Exception e) {
		assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
	}
}
 
Example #4
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createSender(
		NettyShuffleDescriptor shuffleDescriptor,
		Class<? extends AbstractInvokable> abstractInvokable) throws IOException {
	PartitionDescriptor partitionDescriptor = PartitionDescriptorBuilder
		.newBuilder()
		.setPartitionId(shuffleDescriptor.getResultPartitionID().getPartitionId())
		.build();
	ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(
		partitionDescriptor,
		shuffleDescriptor,
		1,
		true);
	return createTestTaskDeploymentDescriptor(
		"Sender",
		shuffleDescriptor.getResultPartitionID().getProducerId(),
		abstractInvokable,
		1,
		Collections.singletonList(resultPartitionDeploymentDescriptor),
		Collections.emptyList());
}
 
Example #5
Source File: TestingTaskExecutorGateway.java    From flink with Apache License 2.0 6 votes vote down vote up
TestingTaskExecutorGateway(
		String address,
		String hostname,
		BiConsumer<ResourceID, AllocatedSlotReport> heartbeatJobManagerConsumer,
		BiConsumer<JobID, Throwable> disconnectJobManagerConsumer,
		BiFunction<TaskDeploymentDescriptor, JobMasterId, CompletableFuture<Acknowledge>> submitTaskConsumer,
		Function<Tuple5<SlotID, JobID, AllocationID, String, ResourceManagerId>, CompletableFuture<Acknowledge>> requestSlotFunction,
		BiFunction<AllocationID, Throwable, CompletableFuture<Acknowledge>> freeSlotFunction,
		Consumer<ResourceID> heartbeatResourceManagerConsumer,
		Consumer<Exception> disconnectResourceManagerConsumer,
		Function<ExecutionAttemptID, CompletableFuture<Acknowledge>> cancelTaskFunction,
		Supplier<CompletableFuture<Boolean>> canBeReleasedSupplier,
		BiConsumer<JobID, Collection<ResultPartitionID>> releasePartitionsConsumer) {
	this.address = Preconditions.checkNotNull(address);
	this.hostname = Preconditions.checkNotNull(hostname);
	this.heartbeatJobManagerConsumer = Preconditions.checkNotNull(heartbeatJobManagerConsumer);
	this.disconnectJobManagerConsumer = Preconditions.checkNotNull(disconnectJobManagerConsumer);
	this.submitTaskConsumer = Preconditions.checkNotNull(submitTaskConsumer);
	this.requestSlotFunction = Preconditions.checkNotNull(requestSlotFunction);
	this.freeSlotFunction = Preconditions.checkNotNull(freeSlotFunction);
	this.heartbeatResourceManagerConsumer = heartbeatResourceManagerConsumer;
	this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer;
	this.cancelTaskFunction = cancelTaskFunction;
	this.canBeReleasedSupplier = canBeReleasedSupplier;
	this.releasePartitionsConsumer = releasePartitionsConsumer;
}
 
Example #6
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we can submit a task to the TaskManager given that we've allocated a slot there.
 */
@Test(timeout = 10000L)
public void testTaskSubmission() throws Exception {
	final ExecutionAttemptID eid = new ExecutionAttemptID();

	final TaskDeploymentDescriptor tdd = createTestTaskDeploymentDescriptor("test task", eid, TaskExecutorTest.TestInvokable.class);

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setSlotSize(1)
			.addTaskManagerActionListener(eid, ExecutionState.RUNNING, taskRunningFuture)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();

		taskRunningFuture.get();
	}
}
 
Example #7
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable,
	int maxNumberOfSubtasks,
	Collection<ResultPartitionDeploymentDescriptor> producedPartitions,
	Collection<InputGateDeploymentDescriptor> inputGates
) throws IOException {
	Preconditions.checkNotNull(producedPartitions);
	Preconditions.checkNotNull(inputGates);
	return createTaskDeploymentDescriptor(
		jobId, testName.getMethodName(), eid,
		new SerializedValue<>(new ExecutionConfig()), taskName, maxNumberOfSubtasks, 0, 1, 0,
		new Configuration(), new Configuration(), abstractInvokable.getName(),
		producedPartitions,
		inputGates,
		Collections.emptyList(),
		Collections.emptyList(),
		0);
}
 
Example #8
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the TaskManager sends a proper exception back to the sender if the submit task
 * message fails.
 */
@Test(timeout = TEST_TIMEOUT)
public void testSubmitTaskFailure() throws Exception {
	final ExecutionAttemptID eid = new ExecutionAttemptID();

	final TaskDeploymentDescriptor tdd = createTestTaskDeploymentDescriptor(
		"test task",
		eid,
		BlockingNoOpInvokable.class,
		0); // this will make the submission fail because the number of key groups must be >= 1

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();
	} catch (Exception e) {
		assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
	}
}
 
Example #9
Source File: JobMasterPartitionReleaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void testPartitionReleaseOrPromotionOnJobTermination(Function<TestSetup, CompletableFuture<ResourceID>> taskExecutorCallSelector, ExecutionState finalExecutionState) throws Exception {
	final CompletableFuture<TaskDeploymentDescriptor> taskDeploymentDescriptorFuture = new CompletableFuture<>();
	final TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder()
		.setSubmitTaskConsumer((tdd, ignored) -> {
			taskDeploymentDescriptorFuture.complete(tdd);
			return CompletableFuture.completedFuture(Acknowledge.get());
		})
		.createTestingTaskExecutorGateway();

	try (final TestSetup testSetup = new TestSetup(rpcService, testingFatalErrorHandler, testingTaskExecutorGateway)) {
		final JobMasterGateway jobMasterGateway = testSetup.getJobMasterGateway();

		// update the execution state of the only execution to target state
		// this should trigger the job to finish
		final TaskDeploymentDescriptor taskDeploymentDescriptor = taskDeploymentDescriptorFuture.get();
		jobMasterGateway.updateTaskExecutionState(
			new TaskExecutionState(
				taskDeploymentDescriptor.getJobId(),
				taskDeploymentDescriptor.getExecutionAttemptId(),
				finalExecutionState));

		assertThat(taskExecutorCallSelector.apply(testSetup).get(), equalTo(testSetup.getTaskExecutorResourceID()));
	}
}
 
Example #10
Source File: TestingTaskExecutorGateway.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
TestingTaskExecutorGateway(
		String address,
		String hostname,
		BiConsumer<ResourceID, AllocatedSlotReport> heartbeatJobManagerConsumer,
		BiConsumer<JobID, Throwable> disconnectJobManagerConsumer,
		BiFunction<TaskDeploymentDescriptor, JobMasterId, CompletableFuture<Acknowledge>> submitTaskConsumer,
		Function<Tuple5<SlotID, JobID, AllocationID, String, ResourceManagerId>, CompletableFuture<Acknowledge>> requestSlotFunction,
		BiFunction<AllocationID, Throwable, CompletableFuture<Acknowledge>> freeSlotFunction,
		Consumer<ResourceID> heartbeatResourceManagerConsumer,
		Consumer<Exception> disconnectResourceManagerConsumer,
		Function<ExecutionAttemptID, CompletableFuture<Acknowledge>> cancelTaskFunction,
		Supplier<Boolean> canBeReleasedSupplier) {
	this.address = Preconditions.checkNotNull(address);
	this.hostname = Preconditions.checkNotNull(hostname);
	this.heartbeatJobManagerConsumer = Preconditions.checkNotNull(heartbeatJobManagerConsumer);
	this.disconnectJobManagerConsumer = Preconditions.checkNotNull(disconnectJobManagerConsumer);
	this.submitTaskConsumer = Preconditions.checkNotNull(submitTaskConsumer);
	this.requestSlotFunction = Preconditions.checkNotNull(requestSlotFunction);
	this.freeSlotFunction = Preconditions.checkNotNull(freeSlotFunction);
	this.heartbeatResourceManagerConsumer = heartbeatResourceManagerConsumer;
	this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer;
	this.cancelTaskFunction = cancelTaskFunction;
	this.canBeReleasedSupplier = canBeReleasedSupplier;
}
 
Example #11
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we can submit a task to the TaskManager given that we've allocated a slot there.
 */
@Test(timeout = TEST_TIMEOUT)
public void testTaskSubmission() throws Exception {
	final ExecutionAttemptID eid = new ExecutionAttemptID();

	final TaskDeploymentDescriptor tdd = createTestTaskDeploymentDescriptor("test task", eid, FutureCompletingInvokable.class);

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setSlotSize(1)
			.addTaskManagerActionListener(eid, ExecutionState.RUNNING, taskRunningFuture)
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();

		taskRunningFuture.get();
	}
}
 
Example #12
Source File: TestingTaskExecutorGateway.java    From flink with Apache License 2.0 5 votes vote down vote up
TestingTaskExecutorGateway(
		String address,
		String hostname,
		BiConsumer<ResourceID, AllocatedSlotReport> heartbeatJobManagerConsumer,
		BiConsumer<JobID, Throwable> disconnectJobManagerConsumer,
		BiFunction<TaskDeploymentDescriptor, JobMasterId, CompletableFuture<Acknowledge>> submitTaskConsumer,
		Function<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>, CompletableFuture<Acknowledge>> requestSlotFunction,
		BiFunction<AllocationID, Throwable, CompletableFuture<Acknowledge>> freeSlotFunction,
		Consumer<ResourceID> heartbeatResourceManagerConsumer,
		Consumer<Exception> disconnectResourceManagerConsumer,
		Function<ExecutionAttemptID, CompletableFuture<Acknowledge>> cancelTaskFunction,
		Supplier<CompletableFuture<Boolean>> canBeReleasedSupplier,
		TriConsumer<JobID, Set<ResultPartitionID>, Set<ResultPartitionID>> releaseOrPromotePartitionsConsumer,
		Consumer<Collection<IntermediateDataSetID>> releaseClusterPartitionsConsumer,
		TriFunction<ExecutionAttemptID, OperatorID, SerializedValue<OperatorEvent>, CompletableFuture<Acknowledge>> operatorEventHandler,
		Supplier<CompletableFuture<ThreadDumpInfo>> requestThreadDumpSupplier) {

	this.address = Preconditions.checkNotNull(address);
	this.hostname = Preconditions.checkNotNull(hostname);
	this.heartbeatJobManagerConsumer = Preconditions.checkNotNull(heartbeatJobManagerConsumer);
	this.disconnectJobManagerConsumer = Preconditions.checkNotNull(disconnectJobManagerConsumer);
	this.submitTaskConsumer = Preconditions.checkNotNull(submitTaskConsumer);
	this.requestSlotFunction = Preconditions.checkNotNull(requestSlotFunction);
	this.freeSlotFunction = Preconditions.checkNotNull(freeSlotFunction);
	this.heartbeatResourceManagerConsumer = heartbeatResourceManagerConsumer;
	this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer;
	this.cancelTaskFunction = cancelTaskFunction;
	this.canBeReleasedSupplier = canBeReleasedSupplier;
	this.releaseOrPromotePartitionsConsumer = releaseOrPromotePartitionsConsumer;
	this.releaseClusterPartitionsConsumer = releaseClusterPartitionsConsumer;
	this.operatorEventHandler = operatorEventHandler;
	this.requestThreadDumpSupplier = requestThreadDumpSupplier;
}
 
Example #13
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskDeploymentDescriptor createReceiver(NettyShuffleDescriptor shuffleDescriptor) throws IOException {
	InputGateDeploymentDescriptor inputGateDeploymentDescriptor = new InputGateDeploymentDescriptor(
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		new ShuffleDescriptor[] {shuffleDescriptor});
	return createTestTaskDeploymentDescriptor(
		"Receiver",
		new ExecutionAttemptID(),
		TestingAbstractInvokables.Receiver.class,
		1,
		Collections.emptyList(),
		Collections.singletonList(inputGateDeploymentDescriptor));
}
 
Example #14
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable,
	int maxNumberOfSubtasks
) throws IOException {
	return createTestTaskDeploymentDescriptor(taskName,
		eid,
		abstractInvokable,
		maxNumberOfSubtasks,
		Collections.emptyList(),
		Collections.emptyList());
}
 
Example #15
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskDeploymentDescriptor createReceiver(NettyShuffleDescriptor shuffleDescriptor) throws IOException {
	InputGateDeploymentDescriptor inputGateDeploymentDescriptor = new InputGateDeploymentDescriptor(
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		new ShuffleDescriptor[] {shuffleDescriptor});
	return createTestTaskDeploymentDescriptor(
		"Receiver",
		new ExecutionAttemptID(),
		TestingAbstractInvokables.Receiver.class,
		1,
		Collections.emptyList(),
		Collections.singletonList(inputGateDeploymentDescriptor));
}
 
Example #16
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 *  Tests that repeated local {@link PartitionNotFoundException}s ultimately fail the receiver.
 */
@Test(timeout = TEST_TIMEOUT)
public void testLocalPartitionNotFound() throws Exception {
	ResourceID producerLocation = ResourceID.generate();
	NettyShuffleDescriptor shuffleDescriptor =
		createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), producerLocation);
	TaskDeploymentDescriptor tdd = createReceiver(shuffleDescriptor);
	ExecutionAttemptID eid = tdd.getExecutionAttemptId();

	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();
	final CompletableFuture<Void> taskFailedFuture = new CompletableFuture<>();

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setResourceID(producerLocation)
			.setSlotSize(1)
			.addTaskManagerActionListener(eid, ExecutionState.RUNNING, taskRunningFuture)
			.addTaskManagerActionListener(eid, ExecutionState.FAILED, taskFailedFuture)
			.setConfiguration(config)
			.useRealNonMockShuffleEnvironment()
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable<Task> taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();
		taskRunningFuture.get();

		taskFailedFuture.get();

		assertSame(taskSlotTable.getTask(eid).getExecutionState(), ExecutionState.FAILED);
		assertThat(taskSlotTable.getTask(eid).getFailureCause(), instanceOf(PartitionNotFoundException.class));
	}
}
 
Example #17
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that repeated remote {@link PartitionNotFoundException}s ultimately fail the receiver.
 */
@Test(timeout = 10000L)
public void testRemotePartitionNotFound() throws Exception {
	final int dataPort = NetUtils.getAvailablePort();
	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);

	// Remote location (on the same TM though) for the partition
	NettyShuffleDescriptor sdd =
		NettyShuffleDescriptorBuilder.newBuilder().setDataPort(dataPort).buildRemote();
	TaskDeploymentDescriptor tdd = createReceiver(sdd);
	ExecutionAttemptID eid = tdd.getExecutionAttemptId();

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();
	final CompletableFuture<Void> taskFailedFuture = new CompletableFuture<>();

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setSlotSize(2)
			.addTaskManagerActionListener(eid, ExecutionState.RUNNING, taskRunningFuture)
			.addTaskManagerActionListener(eid, ExecutionState.FAILED, taskFailedFuture)
			.setConfiguration(config)
			.setLocalCommunication(false)
			.useRealNonMockShuffleEnvironment()
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();
		taskRunningFuture.get();

		taskFailedFuture.get();
		assertThat(taskSlotTable.getTask(eid).getFailureCause(), instanceOf(PartitionNotFoundException.class));
	}
}
 
Example #18
Source File: ExecutionVertexDeploymentTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = ExecutionGraphTestUtils.getExecutionJobVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID, scheduleMode.allowLazyDeployment()).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example #19
Source File: ExecutionGraphStopTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the stopping RPC call is sent upon stopping requests.
 */
@Test
public void testStopRpc() throws Exception {
	final JobID jid = new JobID();
	final JobVertex vertex = new JobVertex("vertex");
	vertex.setInvokableClass(NoOpInvokable.class);
	vertex.setParallelism(5);

	final ExecutionGraph graph = ExecutionGraphTestUtils.createSimpleTestGraph(jid, vertex);
	final Execution exec = graph.getJobVertex(vertex.getID()).getTaskVertices()[0].getCurrentExecutionAttempt();

	final TaskManagerGateway gateway = mock(TaskManagerGateway.class);
	when(gateway.submitTask(any(TaskDeploymentDescriptor.class), any(Time.class)))
			.thenReturn(CompletableFuture.completedFuture(Acknowledge.get()));
	when(gateway.stopTask(any(ExecutionAttemptID.class), any(Time.class)))
			.thenReturn(CompletableFuture.completedFuture(Acknowledge.get()));

	final SimpleSlot slot = ExecutionGraphTestUtils.createMockSimpleSlot(gateway);

	exec.tryAssignResource(slot);
	exec.deploy();
	exec.switchToRunning();
	assertEquals(ExecutionState.RUNNING, exec.getState());

	exec.stop();
	assertEquals(ExecutionState.RUNNING, exec.getState());

	verify(gateway, times(1)).stopTask(any(ExecutionAttemptID.class), any(Time.class));

	exec.markFinished();
	assertEquals(ExecutionState.FINISHED, exec.getState());
}
 
Example #20
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that repeated remote {@link PartitionNotFoundException}s ultimately fail the receiver.
 */
@Test(timeout = TEST_TIMEOUT)
public void testRemotePartitionNotFound() throws Exception {
	final int dataPort = NetUtils.getAvailablePort();
	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.DATA_PORT, dataPort);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);

	// Remote location (on the same TM though) for the partition
	NettyShuffleDescriptor sdd =
		NettyShuffleDescriptorBuilder.newBuilder().setDataPort(dataPort).buildRemote();
	TaskDeploymentDescriptor tdd = createReceiver(sdd);
	ExecutionAttemptID eid = tdd.getExecutionAttemptId();

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();
	final CompletableFuture<Void> taskFailedFuture = new CompletableFuture<>();

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setSlotSize(2)
			.addTaskManagerActionListener(eid, ExecutionState.RUNNING, taskRunningFuture)
			.addTaskManagerActionListener(eid, ExecutionState.FAILED, taskFailedFuture)
			.setConfiguration(config)
			.setLocalCommunication(false)
			.useRealNonMockShuffleEnvironment()
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable<Task> taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();
		taskRunningFuture.get();

		taskFailedFuture.get();
		assertThat(taskSlotTable.getTask(eid).getFailureCause(), instanceOf(PartitionNotFoundException.class));
	}
}
 
Example #21
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable
) throws IOException {
	return createTestTaskDeploymentDescriptor(taskName, eid, abstractInvokable, 1);
}
 
Example #22
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable,
	int maxNumberOfSubtasks
) throws IOException {
	return createTestTaskDeploymentDescriptor(taskName,
		eid,
		abstractInvokable,
		maxNumberOfSubtasks,
		Collections.emptyList(),
		Collections.emptyList());
}
 
Example #23
Source File: ExecutionVertexDeploymentTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	ExecutionJobVertex jobVertex = getExecutionVertex(new JobVertexID(), new DirectScheduledExecutorService());

	IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

	ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));

	ExecutionEdge mockEdge = createMockExecutionEdge(1);

	result.getPartitions()[0].addConsumerGroup();
	result.getPartitions()[0].addConsumer(mockEdge, 0);

	SlotContext slotContext = mock(SlotContext.class);
	when(slotContext.getAllocationId()).thenReturn(new AllocationID());

	LogicalSlot slot = mock(LogicalSlot.class);
	when(slot.getAllocationId()).thenReturn(new AllocationID());

	for (ScheduleMode mode : ScheduleMode.values()) {
		vertex.getExecutionGraph().setScheduleMode(mode);

		TaskDeploymentDescriptor tdd = vertex.createDeploymentDescriptor(new ExecutionAttemptID(), slot, null, 1);

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(mode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example #24
Source File: TaskExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private void setupResultPartitionBookkeeping(TaskDeploymentDescriptor tdd, CompletableFuture<ExecutionState> terminationFuture) {
	final List<ResultPartitionID> partitionsRequiringRelease = tdd.getProducedPartitions().stream()
		// only blocking partitions require explicit release call
		.filter(d -> d.getPartitionType().isBlocking())
		.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
		// partitions without local resources don't store anything on the TaskExecutor
		.filter(d -> d.storesLocalResourcesOn().isPresent())
		.map(ShuffleDescriptor::getResultPartitionID)
		.collect(Collectors.toList());

	partitionTable.startTrackingPartitions(tdd.getJobId(), partitionsRequiringRelease);

	final CompletableFuture<ExecutionState> taskTerminationWithResourceCleanupFuture =
		terminationFuture.thenApplyAsync(
			executionState -> {
				if (executionState != ExecutionState.FINISHED) {
					partitionTable.stopTrackingPartitions(tdd.getJobId(), partitionsRequiringRelease);
				}
				return executionState;
			},
			getMainThreadExecutor());

	taskResultPartitionCleanupFuturesPerJob.compute(
		tdd.getJobId(),
		(jobID, completableFutures) -> {
			if (completableFutures == null) {
				completableFutures = new ArrayList<>(4);
			}

			completableFutures.add(taskTerminationWithResourceCleanupFuture);
			return completableFutures;
		});
}
 
Example #25
Source File: TaskExecutorOperatorEventHandlingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskSubmissionTestEnvironment createExecutorWithRunningTask(
		JobID jobId,
		ExecutionAttemptID executionAttemptId,
		Class<? extends AbstractInvokable> invokableClass) throws Exception {

	final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(
			jobId, executionAttemptId, invokableClass);

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();

	final JobMasterId token = JobMasterId.generate();
	final TaskSubmissionTestEnvironment env = new TaskSubmissionTestEnvironment.Builder(jobId)
			.setJobMasterId(token)
			.setSlotSize(1)
			.addTaskManagerActionListener(executionAttemptId, ExecutionState.RUNNING, taskRunningFuture)
			.setMetricQueryServiceAddress(metricRegistry.getMetricQueryServiceGatewayRpcAddress())
			.setJobMasterGateway(new TestingJobMasterGatewayBuilder()
				.setFencingTokenSupplier(() -> token)
				.setOperatorEventSender((eio, oid, value) -> {
					throw new RuntimeException();
				})
				.build())
			.build();

	env.getTaskSlotTable().allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));

	final TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
	tmGateway.submitTask(tdd, env.getJobMasterId(), Time.seconds(10)).get();
	taskRunningFuture.get();

	return env;
}
 
Example #26
Source File: ExecutionVertexDeploymentTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = getExecutionVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example #27
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 *  Tests that repeated local {@link PartitionNotFoundException}s ultimately fail the receiver.
 */
@Test(timeout = 10000L)
public void testLocalPartitionNotFound() throws Exception {
	ResourceID producerLocation = ResourceID.generate();
	NettyShuffleDescriptor shuffleDescriptor =
		createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), producerLocation);
	TaskDeploymentDescriptor tdd = createReceiver(shuffleDescriptor);
	ExecutionAttemptID eid = tdd.getExecutionAttemptId();

	Configuration config = new Configuration();
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
	config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);

	final CompletableFuture<Void> taskRunningFuture = new CompletableFuture<>();
	final CompletableFuture<Void> taskFailedFuture = new CompletableFuture<>();

	try (TaskSubmissionTestEnvironment env =
		new TaskSubmissionTestEnvironment.Builder(jobId)
			.setResourceID(producerLocation)
			.setSlotSize(1)
			.addTaskManagerActionListener(eid, ExecutionState.RUNNING, taskRunningFuture)
			.addTaskManagerActionListener(eid, ExecutionState.FAILED, taskFailedFuture)
			.setConfiguration(config)
			.useRealNonMockShuffleEnvironment()
			.build()) {
		TaskExecutorGateway tmGateway = env.getTaskExecutorGateway();
		TaskSlotTable taskSlotTable = env.getTaskSlotTable();

		taskSlotTable.allocateSlot(0, jobId, tdd.getAllocationId(), Time.seconds(60));
		tmGateway.submitTask(tdd, env.getJobMasterId(), timeout).get();
		taskRunningFuture.get();

		taskFailedFuture.get();

		assertSame(taskSlotTable.getTask(eid).getExecutionState(), ExecutionState.FAILED);
		assertThat(taskSlotTable.getTask(eid).getFailureCause(), instanceOf(PartitionNotFoundException.class));
	}
}
 
Example #28
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private TaskDeploymentDescriptor createTestTaskDeploymentDescriptor(
	String taskName,
	ExecutionAttemptID eid,
	Class<? extends AbstractInvokable> abstractInvokable
) throws IOException {
	return createTestTaskDeploymentDescriptor(taskName, eid, abstractInvokable, 1);
}
 
Example #29
Source File: SimpleAckingTaskManagerGateway.java    From flink with Apache License 2.0 4 votes vote down vote up
public void setSubmitConsumer(Consumer<TaskDeploymentDescriptor> submitConsumer) {
	this.submitConsumer = submitConsumer;
}
 
Example #30
Source File: TaskExecutorSubmissionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
static TaskDeploymentDescriptor createTaskDeploymentDescriptor(
		JobID jobId,
		String jobName,
		ExecutionAttemptID executionAttemptId,
		SerializedValue<ExecutionConfig> serializedExecutionConfig,
		String taskName,
		int maxNumberOfSubtasks,
		int subtaskIndex,
		int numberOfSubtasks,
		int attemptNumber,
		Configuration jobConfiguration,
		Configuration taskConfiguration,
		String invokableClassName,
		List<ResultPartitionDeploymentDescriptor> producedPartitions,
		List<InputGateDeploymentDescriptor> inputGates,
		Collection<PermanentBlobKey> requiredJarFiles,
		Collection<URL> requiredClasspaths,
		int targetSlotNumber) throws IOException {

	JobInformation jobInformation = new JobInformation(
		jobId,
		jobName,
		serializedExecutionConfig,
		jobConfiguration,
		requiredJarFiles,
		requiredClasspaths);

	TaskInformation taskInformation = new TaskInformation(
		new JobVertexID(),
		taskName,
		numberOfSubtasks,
		maxNumberOfSubtasks,
		invokableClassName,
		taskConfiguration);

	SerializedValue<JobInformation> serializedJobInformation = new SerializedValue<>(jobInformation);
	SerializedValue<TaskInformation> serializedJobVertexInformation = new SerializedValue<>(taskInformation);

	return new TaskDeploymentDescriptor(
		jobId,
		new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation),
		new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobVertexInformation),
		executionAttemptId,
		new AllocationID(),
		subtaskIndex,
		attemptNumber,
		targetSlotNumber,
		null,
		producedPartitions,
		inputGates);
}