Java Code Examples for org.apache.flink.runtime.testutils.DirectScheduledExecutorService

The following examples show how to use org.apache.flink.runtime.testutils.DirectScheduledExecutorService. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus   Source File: YarnResourceManagerTest.java    License: Apache License 2.0 6 votes vote down vote up
MockResourceManagerRuntimeServices() throws Exception {
	highAvailabilityServices = new TestingHighAvailabilityServices();
	rmLeaderElectionService = new TestingLeaderElectionService();
	highAvailabilityServices.setResourceManagerLeaderElectionService(rmLeaderElectionService);
	heartbeatServices = new TestingHeartbeatServices();
	metricRegistry = NoOpMetricRegistry.INSTANCE;
	slotManager = SlotManagerBuilder.newBuilder()
		.setScheduledExecutor(new ScheduledExecutorServiceAdapter(new DirectScheduledExecutorService()))
		.setTaskManagerRequestTimeout(Time.seconds(10))
		.setSlotRequestTimeout(Time.seconds(10))
		.setTaskManagerTimeout(Time.minutes(1))
		.build();
	jobLeaderIdService = new JobLeaderIdService(
			highAvailabilityServices,
			rpcService.getScheduledExecutor(),
			Time.minutes(5L));
}
 
Example 2
Source Project: flink   Source File: ExecutionGraphTestUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static Execution getExecution(
		final JobVertexID jid,
		final int subtaskIndex,
		final int numTasks,
		final SlotSharingGroup slotSharingGroup,
		@Nullable final TaskManagerLocation... locations) throws Exception {

	final ExecutionJobVertex ejv = getExecutionJobVertex(
		jid,
		numTasks,
		slotSharingGroup,
		new DirectScheduledExecutorService(),
		ScheduleMode.LAZY_FROM_SOURCES);
	final TestExecutionVertex ev = new TestExecutionVertex(
		ejv,
		subtaskIndex,
		new IntermediateResult[0],
		DEFAULT_TIMEOUT);

	if (locations != null) {
		ev.setPreferredLocationFutures(mapToPreferredLocationFutures(locations));
	}

	return ev.getCurrentExecutionAttempt();
}
 
Example 3
@Test
public void testDeployFailedSynchronous() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
			AkkaUtils.getDefaultTimeout());

		final LogicalSlot slot = new TestingLogicalSlot(new SubmitFailingSimpleAckingTaskManagerGateway());

		assertEquals(ExecutionState.CREATED, vertex.getExecutionState());

		vertex.deployToSlot(slot);

		assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
		assertNotNull(vertex.getFailureCause());
		assertTrue(vertex.getFailureCause().getMessage().contains(ERROR_MESSAGE));

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 4
@Test
public void testFailExternallyDuringDeploy() {
	try {
		final JobVertexID jid = new JobVertexID();

		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
			AkkaUtils.getDefaultTimeout());

		TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlot(new SubmitBlockingSimpleAckingTaskManagerGateway());

		assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
		vertex.deployToSlot(testingLogicalSlot);
		assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());

		Exception testError = new Exception("test error");
		vertex.fail(testError);

		assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
		assertEquals(testError, vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 5
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	ExecutionJobVertex jobVertex = getExecutionVertex(new JobVertexID(), new DirectScheduledExecutorService());

	IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

	ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));

	ExecutionEdge mockEdge = createMockExecutionEdge(1);

	result.getPartitions()[0].addConsumerGroup();
	result.getPartitions()[0].addConsumer(mockEdge, 0);

	SlotContext slotContext = mock(SlotContext.class);
	when(slotContext.getAllocationId()).thenReturn(new AllocationID());

	LogicalSlot slot = mock(LogicalSlot.class);
	when(slot.getAllocationId()).thenReturn(new AllocationID());

	for (ScheduleMode mode : ScheduleMode.values()) {
		vertex.getExecutionGraph().setScheduleMode(mode);

		TaskDeploymentDescriptor tdd = vertex.createDeploymentDescriptor(new ExecutionAttemptID(), slot, null, 1);

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(mode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example 6
Source Project: Flink-CEPplus   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelFromRunning() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlot(new CancelSequenceSimpleAckingTaskManagerGateway(1));

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();
		vertex.getCurrentExecutionAttempt().completeCancelling(); // response by task manager once actually canceled

		assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());

		assertFalse(slot.isAlive());

		assertNull(vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 7
Source Project: Flink-CEPplus   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelFromRunningDidNotFindTask() {
	// this may happen when the task finished or failed while the call was in progress
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlot(new CancelSequenceSimpleAckingTaskManagerGateway(1));

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();

		assertEquals(ExecutionState.CANCELING, vertex.getExecutionState());

		assertNull(vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 8
Source Project: Flink-CEPplus   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelCallFails() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlot(new CancelSequenceSimpleAckingTaskManagerGateway(0));

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();

		// Callback fails, leading to CANCELED
		assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());

		assertFalse(slot.isAlive());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 9
private static IntermediateResult createResult(
		ResultPartitionType resultPartitionType,
		int producerCount) throws Exception {

	ExecutionJobVertex jobVertex = getExecutionVertex(new JobVertexID(), new DirectScheduledExecutorService());
	IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, producerCount, resultPartitionType);
	for (int i = 0; i < producerCount; i++) {
		// Generate result partition in the result
		new ExecutionVertex(jobVertex, i, new IntermediateResult[]{result}, Time.minutes(1));
	}

	return result;
}
 
Example 10
Source Project: flink   Source File: MockResourceManagerRuntimeServices.java    License: Apache License 2.0 5 votes vote down vote up
public MockResourceManagerRuntimeServices(RpcService rpcService, Time timeout) {
	this(rpcService, timeout, SlotManagerBuilder.newBuilder()
		.setScheduledExecutor(new ScheduledExecutorServiceAdapter(new DirectScheduledExecutorService()))
		.setTaskManagerRequestTimeout(Time.seconds(10))
		.setSlotRequestTimeout(Time.seconds(10))
		.setTaskManagerTimeout(Time.minutes(1))
		.build());
}
 
Example 11
Source Project: flink   Source File: ExecutionGraphDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
private Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> setupExecution(JobVertex v1, int dop1, JobVertex v2, int dop2) throws Exception {
	v1.setParallelism(dop1);
	v2.setParallelism(dop2);

	v1.setInvokableClass(BatchTask.class);
	v2.setInvokableClass(BatchTask.class);

	final ArrayDeque<CompletableFuture<LogicalSlot>> slotFutures = new ArrayDeque<>();
	for (int i = 0; i < dop1 + dop2; i++) {
		slotFutures.addLast(CompletableFuture.completedFuture(new TestingLogicalSlotBuilder().createTestingLogicalSlot()));
	}

	final SlotProvider slotProvider = new TestingSlotProvider(ignore -> slotFutures.removeFirst());

	DirectScheduledExecutorService executorService = new DirectScheduledExecutorService();

	// execution graph that executes actions synchronously
	ExecutionGraph eg = createExecutionGraphWithoutQueuedScheduling(new JobID(), slotProvider, executorService, TestingUtils.defaultExecutor());
	checkJobOffloaded(eg);

	eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());

	List<JobVertex> ordered = Arrays.asList(v1, v2);
	eg.attachJobGraph(ordered);

	// schedule, this triggers mock deployment
	eg.scheduleForExecution();

	Map<ExecutionAttemptID, Execution> executions = eg.getRegisteredExecutions();
	assertEquals(dop1 + dop2, executions.size());

	return new Tuple2<>(eg, executions);
}
 
Example 12
Source Project: flink   Source File: ExecutionVertexDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testDeployFailedSynchronous() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
			AkkaUtils.getDefaultTimeout());

		final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitFailingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot();

		assertEquals(ExecutionState.CREATED, vertex.getExecutionState());

		vertex.deployToSlot(slot);

		assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
		assertNotNull(vertex.getFailureCause());
		assertTrue(vertex.getFailureCause().getMessage().contains(ERROR_MESSAGE));

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 13
Source Project: flink   Source File: ExecutionVertexDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailExternallyDuringDeploy() {
	try {
		final JobVertexID jid = new JobVertexID();

		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
			AkkaUtils.getDefaultTimeout());

		TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitBlockingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot();

		assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
		vertex.deployToSlot(testingLogicalSlot);
		assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());

		Exception testError = new Exception("test error");
		vertex.fail(testError);

		assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
		assertEquals(testError, vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 14
Source Project: flink   Source File: ExecutionVertexDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = getExecutionVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example 15
Source Project: flink   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelFromRunning() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(1)).createTestingLogicalSlot();

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();
		vertex.getCurrentExecutionAttempt().completeCancelling(); // response by task manager once actually canceled

		assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());

		assertFalse(slot.isAlive());

		assertNull(vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 16
Source Project: flink   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelFromRunningDidNotFindTask() {
	// this may happen when the task finished or failed while the call was in progress
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(1)).createTestingLogicalSlot();

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();

		assertEquals(ExecutionState.CANCELING, vertex.getExecutionState());

		assertNull(vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 17
Source Project: flink   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelCallFails() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = getExecutionVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(0)).createTestingLogicalSlot();

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();

		// Callback fails, leading to CANCELED
		assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());

		assertFalse(slot.isAlive());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 18
Source Project: flink   Source File: IntermediateResultPartitionTest.java    License: Apache License 2.0 5 votes vote down vote up
private static IntermediateResult createResult(
		ResultPartitionType resultPartitionType,
		int producerCount) throws Exception {

	ExecutionJobVertex jobVertex = getExecutionVertex(new JobVertexID(), new DirectScheduledExecutorService());
	IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, producerCount, resultPartitionType);
	for (int i = 0; i < producerCount; i++) {
		// Generate result partition in the result
		new ExecutionVertex(jobVertex, i, new IntermediateResult[]{result}, Time.minutes(1));
	}

	return result;
}
 
Example 19
Source Project: flink   Source File: MockResourceManagerRuntimeServices.java    License: Apache License 2.0 5 votes vote down vote up
public MockResourceManagerRuntimeServices(RpcService rpcService, Time timeout) {
	this(rpcService, timeout, SlotManagerBuilder.newBuilder()
		.setScheduledExecutor(new ScheduledExecutorServiceAdapter(new DirectScheduledExecutorService()))
		.setTaskManagerRequestTimeout(Time.seconds(10))
		.setSlotRequestTimeout(Time.seconds(10))
		.setTaskManagerTimeout(Time.minutes(1))
		.build());
}
 
Example 20
Source Project: flink   Source File: ExecutionGraphDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
private Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> setupExecution(JobVertex v1, int dop1, JobVertex v2, int dop2) throws Exception {
	v1.setParallelism(dop1);
	v2.setParallelism(dop2);

	v1.setInvokableClass(BatchTask.class);
	v2.setInvokableClass(BatchTask.class);

	final ArrayDeque<CompletableFuture<LogicalSlot>> slotFutures = new ArrayDeque<>();
	for (int i = 0; i < dop1 + dop2; i++) {
		slotFutures.addLast(CompletableFuture.completedFuture(new TestingLogicalSlotBuilder().createTestingLogicalSlot()));
	}

	final SlotProvider slotProvider = new TestingSlotProvider(ignore -> slotFutures.removeFirst());

	DirectScheduledExecutorService executorService = new DirectScheduledExecutorService();

	// execution graph that executes actions synchronously
	ExecutionGraph eg = TestingExecutionGraphBuilder
		.newBuilder()
		.setFutureExecutor(executorService)
		.setSlotProvider(slotProvider)
		.setBlobWriter(blobWriter)
		.build();

	checkJobOffloaded(eg);

	eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());

	List<JobVertex> ordered = Arrays.asList(v1, v2);
	eg.attachJobGraph(ordered);

	// schedule, this triggers mock deployment
	eg.scheduleForExecution();

	Map<ExecutionAttemptID, Execution> executions = eg.getRegisteredExecutions();
	assertEquals(dop1 + dop2, executions.size());

	return new Tuple2<>(eg, executions);
}
 
Example 21
Source Project: flink   Source File: ExecutionVertexDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testDeployFailedSynchronous() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
			AkkaUtils.getDefaultTimeout());

		final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitFailingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot();

		assertEquals(ExecutionState.CREATED, vertex.getExecutionState());

		vertex.deployToSlot(slot);

		assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
		assertNotNull(vertex.getFailureCause());
		assertTrue(vertex.getFailureCause().getMessage().contains(ERROR_MESSAGE));

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 22
Source Project: flink   Source File: ExecutionVertexDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailExternallyDuringDeploy() {
	try {
		final JobVertexID jid = new JobVertexID();

		final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
			AkkaUtils.getDefaultTimeout());

		TestingLogicalSlot testingLogicalSlot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitBlockingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot();

		assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
		vertex.deployToSlot(testingLogicalSlot);
		assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());

		Exception testError = new Exception("test error");
		vertex.fail(testError);

		assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
		assertEquals(testError, vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 23
Source Project: flink   Source File: ExecutionVertexDeploymentTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
 */
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
	for (ScheduleMode scheduleMode: ScheduleMode.values()) {
		ExecutionJobVertex jobVertex = ExecutionGraphTestUtils.getExecutionJobVertex(
			new JobVertexID(),
			new DirectScheduledExecutorService(),
			scheduleMode);

		IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);

		ExecutionAttemptID attemptID = new ExecutionAttemptID();
		ExecutionVertex vertex =
			new ExecutionVertex(jobVertex, 0, new IntermediateResult[]{result}, Time.minutes(1));
		TaskDeploymentDescriptorFactory tddFactory =
			TaskDeploymentDescriptorFactory.fromExecutionVertex(vertex, 1);

		ExecutionEdge mockEdge = createMockExecutionEdge(1);

		result.getPartitions()[0].addConsumerGroup();
		result.getPartitions()[0].addConsumer(mockEdge, 0);

		TaskManagerLocation location =
			new TaskManagerLocation(ResourceID.generate(), InetAddress.getLoopbackAddress(), 1);

		TaskDeploymentDescriptor tdd = tddFactory.createDeploymentDescriptor(
			new AllocationID(),
			0,
			null,
			Execution.registerProducedPartitions(vertex, location, attemptID, scheduleMode.allowLazyDeployment()).get().values());

		Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();

		assertEquals(1, producedPartitions.size());
		ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
		assertEquals(scheduleMode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
	}
}
 
Example 24
Source Project: flink   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelFromRunning() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(1)).createTestingLogicalSlot();

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();
		vertex.getCurrentExecutionAttempt().completeCancelling(); // response by task manager once actually canceled

		assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());

		assertFalse(slot.isAlive());

		assertNull(vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELED) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 25
Source Project: flink   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelFromRunningDidNotFindTask() {
	// this may happen when the task finished or failed while the call was in progress
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(1)).createTestingLogicalSlot();

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();

		assertEquals(ExecutionState.CANCELING, vertex.getExecutionState());

		assertNull(vertex.getFailureCause());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 26
Source Project: flink   Source File: ExecutionVertexCancelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCancelCallFails() {
	try {
		final JobVertexID jid = new JobVertexID();
		final ExecutionJobVertex ejv = ExecutionGraphTestUtils.getExecutionJobVertex(jid, new DirectScheduledExecutorService());

		final ExecutionVertex vertex = new ExecutionVertex(ejv, 0, new IntermediateResult[0],
				AkkaUtils.getDefaultTimeout());

		LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new CancelSequenceSimpleAckingTaskManagerGateway(0)).createTestingLogicalSlot();

		setVertexResource(vertex, slot);
		setVertexState(vertex, ExecutionState.RUNNING);

		assertEquals(ExecutionState.RUNNING, vertex.getExecutionState());

		vertex.cancel();

		// Callback fails, leading to CANCELED
		assertEquals(ExecutionState.CANCELED, vertex.getExecutionState());

		assertFalse(slot.isAlive());

		assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
		assertTrue(vertex.getStateTimestamp(ExecutionState.CANCELING) > 0);
	} catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 27
Source Project: flink   Source File: ExecutionJobVertexTest.java    License: Apache License 2.0 5 votes vote down vote up
private static ExecutionGraph createExecutionGraph() throws JobException, JobExecutionException {
	final ExecutionGraph executionGraph = TestingExecutionGraphBuilder
		.newBuilder()
		.setFutureExecutor(new DirectScheduledExecutorService())
		.build();
	executionGraph.transitionToRunning();
	return executionGraph;
}
 
Example 28
Source Project: flink   Source File: IntermediateResultPartitionTest.java    License: Apache License 2.0 5 votes vote down vote up
private static IntermediateResult createResult(
		ResultPartitionType resultPartitionType,
		int producerCount) throws Exception {

	ExecutionJobVertex jobVertex = getExecutionJobVertex(new JobVertexID(), new DirectScheduledExecutorService());
	IntermediateResult result =
			new IntermediateResult(new IntermediateDataSetID(), jobVertex, producerCount, resultPartitionType);
	for (int i = 0; i < producerCount; i++) {
		// Generate result partition in the result
		new ExecutionVertex(jobVertex, i, new IntermediateResult[]{result}, Time.minutes(1));
	}

	return result;
}
 
Example 29
/**
 * Tests that a blocking batch job fails if there are not enough resources left to schedule the
 * succeeding tasks. This test case is related to [FLINK-4296] where finished producing tasks
 * swallow the fail exception when scheduling a consumer task.
 */
@Test
public void testNoResourceAvailableFailure() throws Exception {
	final JobID jobId = new JobID();
	JobVertex v1 = new JobVertex("source");
	JobVertex v2 = new JobVertex("sink");

	int dop1 = 1;
	int dop2 = 1;

	v1.setParallelism(dop1);
	v2.setParallelism(dop2);

	v1.setInvokableClass(BatchTask.class);
	v2.setInvokableClass(BatchTask.class);

	v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);

	final ArrayDeque<CompletableFuture<LogicalSlot>> slotFutures = new ArrayDeque<>();
	for (int i = 0; i < dop1; i++) {
		slotFutures.addLast(CompletableFuture.completedFuture(new TestingLogicalSlot()));
	}

	final SlotProvider slotProvider = new TestingSlotProvider(ignore -> slotFutures.removeFirst());

	final JobInformation jobInformation = new DummyJobInformation(
		jobId,
		"failing test job");

	DirectScheduledExecutorService directExecutor = new DirectScheduledExecutorService();

	// execution graph that executes actions synchronously
	ExecutionGraph eg = new ExecutionGraph(
		jobInformation,
		directExecutor,
		TestingUtils.defaultExecutor(),
		AkkaUtils.getDefaultTimeout(),
		new NoRestartStrategy(),
		new RestartAllStrategy.Factory(),
		slotProvider,
		ExecutionGraph.class.getClassLoader(),
		blobWriter,
		AkkaUtils.getDefaultTimeout());

	eg.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread());

	checkJobOffloaded(eg);

	eg.setQueuedSchedulingAllowed(false);

	List<JobVertex> ordered = Arrays.asList(v1, v2);
	eg.attachJobGraph(ordered);

	// schedule, this triggers mock deployment
	eg.scheduleForExecution();

	ExecutionAttemptID attemptID = eg.getJobVertex(v1.getID()).getTaskVertices()[0].getCurrentExecutionAttempt().getAttemptId();
	eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.RUNNING));
	eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.FINISHED, null));

	assertEquals(JobStatus.FAILED, eg.getState());
}
 
Example 30
private Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> setupExecution(JobVertex v1, int dop1, JobVertex v2, int dop2) throws Exception {
	final JobID jobId = new JobID();

	v1.setParallelism(dop1);
	v2.setParallelism(dop2);

	v1.setInvokableClass(BatchTask.class);
	v2.setInvokableClass(BatchTask.class);

	final ArrayDeque<CompletableFuture<LogicalSlot>> slotFutures = new ArrayDeque<>();
	for (int i = 0; i < dop1 + dop2; i++) {
		slotFutures.addLast(CompletableFuture.completedFuture(new TestingLogicalSlot()));
	}

	final SlotProvider slotProvider = new TestingSlotProvider(ignore -> slotFutures.removeFirst());

	final JobInformation jobInformation = new DummyJobInformation(
		jobId,
		"some job");

	DirectScheduledExecutorService executorService = new DirectScheduledExecutorService();

	// execution graph that executes actions synchronously
	ExecutionGraph eg = new ExecutionGraph(
		jobInformation,
		executorService,
		TestingUtils.defaultExecutor(),
		AkkaUtils.getDefaultTimeout(),
		new NoRestartStrategy(),
		new RestartAllStrategy.Factory(),
		slotProvider,
		ExecutionGraph.class.getClassLoader(),
		blobWriter,
		AkkaUtils.getDefaultTimeout());
	checkJobOffloaded(eg);

	eg.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread());

	eg.setQueuedSchedulingAllowed(false);

	List<JobVertex> ordered = Arrays.asList(v1, v2);
	eg.attachJobGraph(ordered);

	// schedule, this triggers mock deployment
	eg.scheduleForExecution();

	Map<ExecutionAttemptID, Execution> executions = eg.getRegisteredExecutions();
	assertEquals(dop1 + dop2, executions.size());

	return new Tuple2<>(eg, executions);
}