org.apache.flink.runtime.executiongraph.Execution Java Examples

The following examples show how to use org.apache.flink.runtime.executiongraph.Execution. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: OperatorCoordinatorHolder.java    From flink with Apache License 2.0 6 votes vote down vote up
public static OperatorCoordinatorHolder create(
		SerializedValue<OperatorCoordinator.Provider> serializedProvider,
		ExecutionJobVertex jobVertex,
		ClassLoader classLoader) throws IOException, ClassNotFoundException {

	try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(classLoader)) {
		final OperatorCoordinator.Provider provider = serializedProvider.deserializeValue(classLoader);
		final OperatorID opId = provider.getOperatorId();

		final BiFunction<SerializedValue<OperatorEvent>, Integer, CompletableFuture<Acknowledge>> eventSender =
			(serializedEvent, subtask) -> {
				final Execution executionAttempt = jobVertex.getTaskVertices()[subtask].getCurrentExecutionAttempt();
				return executionAttempt.sendOperatorEvent(opId, serializedEvent);
			};

		return create(
				opId,
				provider,
				eventSender,
				jobVertex.getName(),
				jobVertex.getParallelism(),
				jobVertex.getMaxParallelism());
	}
}
 
Example #2
Source File: RestartIndividualStrategy.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void onTaskFailure(Execution taskExecution, Throwable cause) {

	executionGraph.getJobMasterMainThreadExecutor().assertRunningInMainThread();

	// to better handle the lack of resources (potentially by a scale-in), we
	// make failures due to missing resources global failures 
	if (cause instanceof NoResourceAvailableException) {
		LOG.info("Not enough resources to schedule {} - triggering full recovery.", taskExecution);
		executionGraph.failGlobal(cause);
		return;
	}

	LOG.info("Recovering task failure for {} (#{}) via individual restart.", 
			taskExecution.getVertex().getTaskNameWithSubtaskIndex(), taskExecution.getAttemptNumber());

	numTaskFailures.inc();

	// trigger the restart once the task has reached its terminal state
	// Note: currently all tasks passed here are already in their terminal state,
	//       so we could actually avoid the future. We use it anyways because it is cheap and
	//       it helps to support better testing
	final CompletableFuture<ExecutionState> terminationFuture = taskExecution.getTerminalStateFuture();
	terminationFuture.thenRun(
		() -> performExecutionVertexRestart(taskExecution.getVertex(), taskExecution.getGlobalModVersion()));
}
 
Example #3
Source File: PartialInputChannelDeploymentDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a channel deployment descriptor by completing the partition location.
 *
 * @see InputChannelDeploymentDescriptor
 */
public InputChannelDeploymentDescriptor createInputChannelDeploymentDescriptor(Execution consumerExecution) {
	checkNotNull(consumerExecution, "consumerExecution");

	TaskManagerLocation consumerLocation = consumerExecution.getAssignedResourceLocation();
	checkNotNull(consumerLocation, "Consumer connection info null");

	final ResultPartitionLocation partitionLocation;

	if (consumerLocation.equals(partitionTaskManagerLocation)) {
		partitionLocation = ResultPartitionLocation.createLocal();
	}
	else {
		partitionLocation = ResultPartitionLocation.createRemote(
				new ConnectionID(partitionTaskManagerLocation, partitionConnectionIndex));
	}

	return new InputChannelDeploymentDescriptor(partitionID, partitionLocation);
}
 
Example #4
Source File: InputChannelDeploymentDescriptorTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static ExecutionVertex mockExecutionVertex(ExecutionState state, ResourceID resourceId) {
	ExecutionVertex vertex = mock(ExecutionVertex.class);

	Execution exec = mock(Execution.class);
	when(exec.getState()).thenReturn(state);
	when(exec.getAttemptId()).thenReturn(new ExecutionAttemptID());

	if (resourceId != null) {
		LogicalSlot slot = mockSlot(resourceId);
		when(exec.getAssignedResource()).thenReturn(slot);
		when(vertex.getCurrentAssignedResource()).thenReturn(slot);
	} else {
		when(exec.getAssignedResource()).thenReturn(null); // no resource
		when(vertex.getCurrentAssignedResource()).thenReturn(null);
	}

	when(vertex.getCurrentExecutionAttempt()).thenReturn(exec);

	return vertex;
}
 
Example #5
Source File: StackTraceSampleCoordinatorTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private ExecutionVertex mockExecutionVertex(
		ExecutionAttemptID executionId,
		ExecutionState state,
		boolean sendSuccess) {

	Execution exec = Mockito.mock(Execution.class);
	CompletableFuture<StackTraceSampleResponse> failedFuture = new CompletableFuture<>();
	failedFuture.completeExceptionally(new Exception("Send failed."));

	Mockito.when(exec.getAttemptId()).thenReturn(executionId);
	Mockito.when(exec.getState()).thenReturn(state);
	Mockito.when(exec.requestStackTraceSample(Matchers.anyInt(), Matchers.anyInt(), Matchers.any(Time.class), Matchers.anyInt(), Matchers.any(Time.class)))
		.thenReturn(
			sendSuccess ?
				CompletableFuture.completedFuture(Mockito.mock(StackTraceSampleResponse.class)) :
				failedFuture);

	ExecutionVertex vertex = Mockito.mock(ExecutionVertex.class);
	Mockito.when(vertex.getJobvertexId()).thenReturn(new JobVertexID());
	Mockito.when(vertex.getCurrentExecutionAttempt()).thenReturn(exec);

	return vertex;
}
 
Example #6
Source File: TaskDeploymentDescriptorFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ShuffleDescriptor getConsumedPartitionShuffleDescriptor(
		ExecutionEdge edge,
		boolean allowUnknownPartitions) {
	IntermediateResultPartition consumedPartition = edge.getSource();
	Execution producer = consumedPartition.getProducer().getCurrentExecutionAttempt();

	ExecutionState producerState = producer.getState();
	Optional<ResultPartitionDeploymentDescriptor> consumedPartitionDescriptor =
		producer.getResultPartitionDeploymentDescriptor(consumedPartition.getPartitionId());

	ResultPartitionID consumedPartitionId = new ResultPartitionID(
		consumedPartition.getPartitionId(),
		producer.getAttemptId());

	return getConsumedPartitionShuffleDescriptor(
		consumedPartitionId,
		consumedPartition.getResultType(),
		consumedPartition.isConsumable(),
		producerState,
		allowUnknownPartitions,
		consumedPartitionDescriptor.orElse(null));
}
 
Example #7
Source File: RestartPipelinedRegionStrategy.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void onTaskFailure(Execution taskExecution, Throwable cause) {
	final ExecutionVertex ev = taskExecution.getVertex();
	final FailoverRegion failoverRegion = vertexToRegion.get(ev);

	if (failoverRegion == null) {
		executionGraph.failGlobal(new FlinkException(
				"Can not find a failover region for the execution " + ev.getTaskNameWithSubtaskIndex(), cause));
	}
	else {
		LOG.info("Recovering task failure for {} #{} ({}) via restart of failover region",
				taskExecution.getVertex().getTaskNameWithSubtaskIndex(),
				taskExecution.getAttemptNumber(),
				taskExecution.getAttemptId());

		failoverRegion.onExecutionFail(taskExecution, cause);
	}
}
 
Example #8
Source File: RestartIndividualStrategy.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void onTaskFailure(Execution taskExecution, Throwable cause) {

	executionGraph.getJobMasterMainThreadExecutor().assertRunningInMainThread();

	// to better handle the lack of resources (potentially by a scale-in), we
	// make failures due to missing resources global failures 
	if (cause instanceof NoResourceAvailableException) {
		LOG.info("Not enough resources to schedule {} - triggering full recovery.", taskExecution);
		executionGraph.failGlobal(cause);
		return;
	}

	LOG.info("Recovering task failure for {} (#{}) via individual restart.", 
			taskExecution.getVertex().getTaskNameWithSubtaskIndex(), taskExecution.getAttemptNumber());

	numTaskFailures.inc();

	// trigger the restart once the task has reached its terminal state
	// Note: currently all tasks passed here are already in their terminal state,
	//       so we could actually avoid the future. We use it anyways because it is cheap and
	//       it helps to support better testing
	final CompletableFuture<ExecutionState> terminationFuture = taskExecution.getTerminalStateFuture();
	terminationFuture.thenRun(
		() -> performExecutionVertexRestart(taskExecution.getVertex(), taskExecution.getGlobalModVersion()));
}
 
Example #9
Source File: SchedulerTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static Execution getTestVertex(Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures) {
	ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);
	ExecutionVertex vertex = mock(ExecutionVertex.class);

	when(vertex.getPreferredLocationsBasedOnInputs()).thenReturn(preferredLocationFutures);
	when(vertex.getPreferredLocations()).thenReturn(preferredLocationFutures);
	when(vertex.getJobId()).thenReturn(new JobID());
	when(vertex.toString()).thenReturn("TEST-VERTEX");
	when(vertex.getJobVertex()).thenReturn(executionJobVertex);
	when(vertex.getJobvertexId()).thenReturn(new JobVertexID());

	Execution execution = mock(Execution.class);
	when(execution.getVertex()).thenReturn(vertex);
	when(execution.calculatePreferredLocations(any(LocationPreferenceConstraint.class))).thenCallRealMethod();

	return execution;
}
 
Example #10
Source File: SchedulerTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static Execution getTestVertex(JobVertexID jid, int taskIndex, int numTasks, SlotSharingGroup slotSharingGroup) {
	ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);
	ExecutionVertex vertex = mock(ExecutionVertex.class);

	when(executionJobVertex.getSlotSharingGroup()).thenReturn(slotSharingGroup);
	when(vertex.getPreferredLocationsBasedOnInputs()).thenReturn(Collections.emptyList());
	when(vertex.getJobId()).thenReturn(new JobID());
	when(vertex.getJobvertexId()).thenReturn(jid);
	when(vertex.getParallelSubtaskIndex()).thenReturn(taskIndex);
	when(vertex.getTotalNumberOfParallelSubtasks()).thenReturn(numTasks);
	when(vertex.getMaxParallelism()).thenReturn(numTasks);
	when(vertex.toString()).thenReturn("TEST-VERTEX");
	when(vertex.getTaskNameWithSubtaskIndex()).thenReturn("TEST-VERTEX");
	when(vertex.getJobVertex()).thenReturn(executionJobVertex);

	Execution execution = mock(Execution.class);
	when(execution.getVertex()).thenReturn(vertex);
	
	return execution;
}
 
Example #11
Source File: TaskDeploymentDescriptorFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ShuffleDescriptor getConsumedPartitionShuffleDescriptor(
		ExecutionEdge edge,
		boolean allowUnknownPartitions) {
	IntermediateResultPartition consumedPartition = edge.getSource();
	Execution producer = consumedPartition.getProducer().getCurrentExecutionAttempt();

	ExecutionState producerState = producer.getState();
	Optional<ResultPartitionDeploymentDescriptor> consumedPartitionDescriptor =
		producer.getResultPartitionDeploymentDescriptor(consumedPartition.getPartitionId());

	ResultPartitionID consumedPartitionId = new ResultPartitionID(
		consumedPartition.getPartitionId(),
		producer.getAttemptId());

	return getConsumedPartitionShuffleDescriptor(
		consumedPartitionId,
		consumedPartition.getResultType(),
		consumedPartition.isConsumable(),
		producerState,
		allowUnknownPartitions,
		consumedPartitionDescriptor.orElse(null));
}
 
Example #12
Source File: CheckpointCoordinator.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Snapshot task state.
 *
 * @param timestamp the timestamp of this checkpoint reques
 * @param checkpointID the checkpoint id
 * @param checkpointStorageLocation the checkpoint location
 * @param props the checkpoint properties
 * @param executions the executions which should be triggered
 * @param advanceToEndOfTime Flag indicating if the source should inject a {@code MAX_WATERMARK}
 *                               in the pipeline to fire any registered event-time timers.
 */
private void snapshotTaskState(
	long timestamp,
	long checkpointID,
	CheckpointStorageLocation checkpointStorageLocation,
	CheckpointProperties props,
	Execution[] executions,
	boolean advanceToEndOfTime) {

	final CheckpointOptions checkpointOptions = new CheckpointOptions(
		props.getCheckpointType(),
		checkpointStorageLocation.getLocationReference(),
		isExactlyOnceMode,
		props.getCheckpointType() == CheckpointType.CHECKPOINT && unalignedCheckpointsEnabled);

	// send the messages to the tasks that trigger their checkpoint
	for (Execution execution: executions) {
		if (props.isSynchronous()) {
			execution.triggerSynchronousSavepoint(checkpointID, timestamp, checkpointOptions, advanceToEndOfTime);
		} else {
			execution.triggerCheckpoint(checkpointID, timestamp, checkpointOptions);
		}
	}
}
 
Example #13
Source File: BackPressureStatsTrackerImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private ExecutionVertex mockExecutionVertex(
		ExecutionJobVertex jobVertex,
		int subTaskIndex) {

	Execution exec = Mockito.mock(Execution.class);
	Mockito.when(exec.getAttemptId()).thenReturn(new ExecutionAttemptID());

	JobVertexID id = jobVertex.getJobVertexId();

	ExecutionVertex vertex = Mockito.mock(ExecutionVertex.class);
	Mockito.when(vertex.getJobvertexId()).thenReturn(id);
	Mockito.when(vertex.getCurrentExecutionAttempt()).thenReturn(exec);
	Mockito.when(vertex.getParallelSubtaskIndex()).thenReturn(subTaskIndex);

	return vertex;
}
 
Example #14
Source File: BackPressureRequestCoordinator.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Requests back pressure stats from all the given executions. The response would
 * be ignored if it does not return within timeout.
 */
private void requestBackPressure(Execution[] executions, int requestId) {
	assert Thread.holdsLock(lock);

	for (Execution execution: executions) {
		CompletableFuture<TaskBackPressureResponse> taskBackPressureFuture =
			execution.requestBackPressure(requestId, requestTimeout);

		taskBackPressureFuture.handleAsync(
			(TaskBackPressureResponse taskBackPressureResponse, Throwable throwable) -> {
				if (taskBackPressureResponse != null) {
					handleSuccessfulTaskBackPressureResponse(taskBackPressureResponse);
				} else {
					handleFailedTaskBackPressureResponse(requestId, throwable);
				}

				return null;
			},
			executor);
	}
}
 
Example #15
Source File: PartialInputChannelDeploymentDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a partial input channel for the given partition and producing task.
 */
public static PartialInputChannelDeploymentDescriptor fromEdge(
		IntermediateResultPartition partition,
		Execution producer) {

	final ResultPartitionID partitionId = new ResultPartitionID(
			partition.getPartitionId(), producer.getAttemptId());

	final IntermediateResult result = partition.getIntermediateResult();

	final IntermediateDataSetID resultId = result.getId();
	final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation();
	final int partitionConnectionIndex = result.getConnectionIndex();

	return new PartialInputChannelDeploymentDescriptor(
			resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex);
}
 
Example #16
Source File: SchedulerBase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ExecutionState requestPartitionState(
	final IntermediateDataSetID intermediateResultId,
	final ResultPartitionID resultPartitionId) throws PartitionProducerDisposedException {

	mainThreadExecutor.assertRunningInMainThread();

	final Execution execution = executionGraph.getRegisteredExecutions().get(resultPartitionId.getProducerId());
	if (execution != null) {
		return execution.getState();
	}
	else {
		final IntermediateResult intermediateResult =
			executionGraph.getAllIntermediateResults().get(intermediateResultId);

		if (intermediateResult != null) {
			// Try to find the producing execution
			Execution producerExecution = intermediateResult
				.getPartitionById(resultPartitionId.getPartitionId())
				.getProducer()
				.getCurrentExecutionAttempt();

			if (producerExecution.getAttemptId().equals(resultPartitionId.getProducerId())) {
				return producerExecution.getState();
			} else {
				throw new PartitionProducerDisposedException(resultPartitionId);
			}
		} else {
			throw new IllegalArgumentException("Intermediate data set with ID "
				+ intermediateResultId + " not found.");
		}
	}
}
 
Example #17
Source File: JobMaster.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<ExecutionState> requestPartitionState(
		final IntermediateDataSetID intermediateResultId,
		final ResultPartitionID resultPartitionId) {

	final Execution execution = executionGraph.getRegisteredExecutions().get(resultPartitionId.getProducerId());
	if (execution != null) {
		return CompletableFuture.completedFuture(execution.getState());
	}
	else {
		final IntermediateResult intermediateResult =
				executionGraph.getAllIntermediateResults().get(intermediateResultId);

		if (intermediateResult != null) {
			// Try to find the producing execution
			Execution producerExecution = intermediateResult
					.getPartitionById(resultPartitionId.getPartitionId())
					.getProducer()
					.getCurrentExecutionAttempt();

			if (producerExecution.getAttemptId().equals(resultPartitionId.getProducerId())) {
				return CompletableFuture.completedFuture(producerExecution.getState());
			} else {
				return FutureUtils.completedExceptionally(new PartitionProducerDisposedException(resultPartitionId));
			}
		} else {
			return FutureUtils.completedExceptionally(new IllegalArgumentException("Intermediate data set with ID "
					+ intermediateResultId + " not found."));
		}
	}
}
 
Example #18
Source File: CheckpointCoordinator.java    From flink with Apache License 2.0 5 votes vote down vote up
private void sendAbortedMessages(long checkpointId, long timeStamp) {
	// send notification of aborted checkpoints asynchronously.
	executor.execute(() -> {
		// send the "abort checkpoint" messages to necessary vertices.
		for (ExecutionVertex ev : tasksToCommitTo) {
			Execution ee = ev.getCurrentExecutionAttempt();
			if (ee != null) {
				ee.notifyCheckpointAborted(checkpointId, timeStamp);
			}
		}
	});
}
 
Example #19
Source File: SchedulerTestUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static Execution getTestVertexWithLocation(
		JobVertexID jid,
		int taskIndex,
		int numTasks,
		SlotSharingGroup slotSharingGroup,
		TaskManagerLocation... locations) {

	ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);

	when(executionJobVertex.getSlotSharingGroup()).thenReturn(slotSharingGroup);

	ExecutionVertex vertex = mock(ExecutionVertex.class);

	Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = new ArrayList<>(locations.length);

	for (TaskManagerLocation location : locations) {
		preferredLocationFutures.add(CompletableFuture.completedFuture(location));
	}

	when(vertex.getJobVertex()).thenReturn(executionJobVertex);
	when(vertex.getPreferredLocationsBasedOnInputs()).thenReturn(preferredLocationFutures);
	when(vertex.getJobId()).thenReturn(new JobID());
	when(vertex.getJobvertexId()).thenReturn(jid);
	when(vertex.getParallelSubtaskIndex()).thenReturn(taskIndex);
	when(vertex.getTotalNumberOfParallelSubtasks()).thenReturn(numTasks);
	when(vertex.getMaxParallelism()).thenReturn(numTasks);
	when(vertex.toString()).thenReturn("TEST-VERTEX");

	Execution execution = mock(Execution.class);
	when(execution.getVertex()).thenReturn(vertex);

	return execution;
}
 
Example #20
Source File: CheckpointCoordinator.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Check if all tasks that we need to trigger are running. If not, abort the checkpoint.
 *
 * @return the executions need to be triggered.
 * @throws CheckpointException the exception fails checking
 */
private Execution[] getTriggerExecutions() throws CheckpointException {
	Execution[] executions = new Execution[tasksToTrigger.length];
	for (int i = 0; i < tasksToTrigger.length; i++) {
		Execution ee = tasksToTrigger[i].getCurrentExecutionAttempt();
		if (ee == null) {
			LOG.info(
				"Checkpoint triggering task {} of job {} is not being executed at the moment. Aborting checkpoint.",
				tasksToTrigger[i].getTaskNameWithSubtaskIndex(),
				job);
			throw new CheckpointException(
				CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
		} else if (ee.getState() == ExecutionState.RUNNING) {
			executions[i] = ee;
		} else {
			LOG.info(
				"Checkpoint triggering task {} of job {} is not in state {} but {} instead. Aborting checkpoint.",
				tasksToTrigger[i].getTaskNameWithSubtaskIndex(),
				job,
				ExecutionState.RUNNING,
				ee.getState());
			throw new CheckpointException(
				CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
		}
	}
	return executions;
}
 
Example #21
Source File: CheckpointCoordinatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ExecutionVertex mockExecutionVertex(
	ExecutionAttemptID attemptID,
	JobVertexID jobVertexID,
	List<OperatorID> jobVertexIDs,
	int parallelism,
	int maxParallelism,
	ExecutionState state,
	ExecutionState ... successiveStates) {

	ExecutionVertex vertex = mock(ExecutionVertex.class);

	final Execution exec = spy(new Execution(
		mock(Executor.class),
		vertex,
		1,
		1L,
		1L,
		Time.milliseconds(500L)
	));
	when(exec.getAttemptId()).thenReturn(attemptID);
	when(exec.getState()).thenReturn(state, successiveStates);

	when(vertex.getJobvertexId()).thenReturn(jobVertexID);
	when(vertex.getCurrentExecutionAttempt()).thenReturn(exec);
	when(vertex.getTotalNumberOfParallelSubtasks()).thenReturn(parallelism);
	when(vertex.getMaxParallelism()).thenReturn(maxParallelism);

	ExecutionJobVertex jobVertex = mock(ExecutionJobVertex.class);
	when(jobVertex.getOperatorIDs()).thenReturn(jobVertexIDs);
	
	when(vertex.getJobVertex()).thenReturn(jobVertex);

	return vertex;
}
 
Example #22
Source File: ScheduledUnit.java    From flink with Apache License 2.0 5 votes vote down vote up
public ScheduledUnit(
	@Nullable Execution task,
	JobVertexID jobVertexId,
	@Nullable SlotSharingGroupId slotSharingGroupId,
	@Nullable CoLocationConstraint coLocationConstraint) {

	this.vertexExecution = task;
	this.jobVertexId = Preconditions.checkNotNull(jobVertexId);
	this.slotSharingGroupId = slotSharingGroupId;
	this.coLocationConstraint = coLocationConstraint;

}
 
Example #23
Source File: ScheduledUnit.java    From flink with Apache License 2.0 5 votes vote down vote up
public ScheduledUnit(
		Execution task,
		@Nullable SlotSharingGroupId slotSharingGroupId,
		@Nullable CoLocationConstraint coLocationConstraint) {
	this(
		Preconditions.checkNotNull(task),
		task.getVertex().getJobvertexId(),
		slotSharingGroupId,
		coLocationConstraint);
}
 
Example #24
Source File: ScheduledUnit.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public ScheduledUnit(
		Execution task,
		@Nullable SlotSharingGroupId slotSharingGroupId,
		@Nullable CoLocationConstraint coLocationConstraint) {
	this(
		Preconditions.checkNotNull(task),
		task.getVertex().getJobvertexId(),
		slotSharingGroupId,
		coLocationConstraint);
}
 
Example #25
Source File: ScheduledUnit.java    From flink with Apache License 2.0 5 votes vote down vote up
public ScheduledUnit(Execution task) {
	this(
		Preconditions.checkNotNull(task),
		task.getVertex().getJobvertexId(),
		null,
		null);
}
 
Example #26
Source File: FailoverRegion.java    From flink with Apache License 2.0 5 votes vote down vote up
public void onExecutionFail(Execution taskExecution, Throwable cause) {
	// TODO: check if need to failover the preceding region
	if (!executionGraph.getRestartStrategy().canRestart()) {
		// delegate the failure to a global fail that will check the restart strategy and not restart
		executionGraph.failGlobal(cause);
	}
	else {
		cancel(taskExecution.getGlobalModVersion());
	}
}
 
Example #27
Source File: CheckpointCoordinatorTestingUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
static ExecutionVertex mockExecutionVertex(Execution execution, JobVertexID vertexId, int subtask, int parallelism) {
	ExecutionVertex mock = mock(ExecutionVertex.class);
	when(mock.getJobvertexId()).thenReturn(vertexId);
	when(mock.getParallelSubtaskIndex()).thenReturn(subtask);
	when(mock.getCurrentExecutionAttempt()).thenReturn(execution);
	when(mock.getTotalNumberOfParallelSubtasks()).thenReturn(parallelism);
	when(mock.getMaxParallelism()).thenReturn(parallelism);
	return mock;
}
 
Example #28
Source File: LegacyScheduler.java    From flink with Apache License 2.0 5 votes vote down vote up
private String retrieveTaskManagerLocation(ExecutionAttemptID executionAttemptID) {
	final Optional<Execution> currentExecution = Optional.ofNullable(executionGraph.getRegisteredExecutions().get(executionAttemptID));

	return currentExecution
		.map(Execution::getAssignedResourceLocation)
		.map(TaskManagerLocation::toString)
		.orElse("Unknown location");
}
 
Example #29
Source File: LegacyScheduler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ExecutionState requestPartitionState(
		final IntermediateDataSetID intermediateResultId,
		final ResultPartitionID resultPartitionId) throws PartitionProducerDisposedException {

	mainThreadExecutor.assertRunningInMainThread();

	final Execution execution = executionGraph.getRegisteredExecutions().get(resultPartitionId.getProducerId());
	if (execution != null) {
		return execution.getState();
	}
	else {
		final IntermediateResult intermediateResult =
			executionGraph.getAllIntermediateResults().get(intermediateResultId);

		if (intermediateResult != null) {
			// Try to find the producing execution
			Execution producerExecution = intermediateResult
				.getPartitionById(resultPartitionId.getPartitionId())
				.getProducer()
				.getCurrentExecutionAttempt();

			if (producerExecution.getAttemptId().equals(resultPartitionId.getProducerId())) {
				return producerExecution.getState();
			} else {
				throw new PartitionProducerDisposedException(resultPartitionId);
			}
		} else {
			throw new IllegalArgumentException("Intermediate data set with ID "
				+ intermediateResultId + " not found.");
		}
	}
}
 
Example #30
Source File: CheckpointCoordinatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ExecutionVertex mockExecutionVertex(Execution execution, JobVertexID vertexId, int subtask, int parallelism) {
	ExecutionVertex mock = mock(ExecutionVertex.class);
	when(mock.getJobvertexId()).thenReturn(vertexId);
	when(mock.getParallelSubtaskIndex()).thenReturn(subtask);
	when(mock.getCurrentExecutionAttempt()).thenReturn(execution);
	when(mock.getTotalNumberOfParallelSubtasks()).thenReturn(parallelism);
	when(mock.getMaxParallelism()).thenReturn(parallelism);
	return mock;
}