org.apache.flink.runtime.executiongraph.ExecutionJobVertex Java Examples

The following examples show how to use org.apache.flink.runtime.executiongraph.ExecutionJobVertex. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SchedulerTestUtils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static Execution getTestVertex(JobVertexID jid, int taskIndex, int numTasks, SlotSharingGroup slotSharingGroup) {
	ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);
	ExecutionVertex vertex = mock(ExecutionVertex.class);

	when(executionJobVertex.getSlotSharingGroup()).thenReturn(slotSharingGroup);
	when(vertex.getPreferredLocationsBasedOnInputs()).thenReturn(Collections.emptyList());
	when(vertex.getJobId()).thenReturn(new JobID());
	when(vertex.getJobvertexId()).thenReturn(jid);
	when(vertex.getParallelSubtaskIndex()).thenReturn(taskIndex);
	when(vertex.getTotalNumberOfParallelSubtasks()).thenReturn(numTasks);
	when(vertex.getMaxParallelism()).thenReturn(numTasks);
	when(vertex.toString()).thenReturn("TEST-VERTEX");
	when(vertex.getTaskNameWithSubtaskIndex()).thenReturn("TEST-VERTEX");
	when(vertex.getJobVertex()).thenReturn(executionJobVertex);

	Execution execution = mock(Execution.class);
	when(execution.getVertex()).thenReturn(vertex);
	
	return execution;
}
 
Example #2
Source File: RestartPipelinedRegionStrategy.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private void makeAllOneRegion(List<ExecutionJobVertex> jobVertices) {
	LOG.warn("Cannot decompose ExecutionGraph into individual failover regions due to use of " +
			"Co-Location constraints (iterations). Job will fail over as one holistic unit.");

	final ArrayList<ExecutionVertex> allVertices = new ArrayList<>();

	for (ExecutionJobVertex ejv : jobVertices) {

		// safe some incremental size growing
		allVertices.ensureCapacity(allVertices.size() + ejv.getParallelism());

		allVertices.addAll(Arrays.asList(ejv.getTaskVertices()));
	}

	final FailoverRegion singleRegion = createFailoverRegion(executionGraph, allVertices);
	for (ExecutionVertex ev : allVertices) {
		vertexToRegion.put(ev, singleRegion);
	}
}
 
Example #3
Source File: CheckpointMetadataLoadingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that savepoint loading fails when there is non-restored coordinator state only,
 * and non-restored state is not allowed.
 */
@Test
public void testUnmatchedCoordinatorOnlyStateFails() throws Exception {
	final OperatorID operatorID = new OperatorID();
	final int maxParallelism = 1234;

	final OperatorState state = new OperatorState(operatorID, maxParallelism / 2, maxParallelism);
	state.setCoordinatorState(new ByteStreamStateHandle("coordinatorState", new byte[0]));

	final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorState(42L, state);
	final Map<JobVertexID, ExecutionJobVertex> tasks = Collections.emptyMap();

	try {
		Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false);
		fail("Did not throw expected Exception");
	} catch (IllegalStateException expected) {
		assertTrue(expected.getMessage().contains("allowNonRestoredState"));
	}
}
 
Example #4
Source File: CheckpointMetadataLoadingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that savepoint loading fails when there is non-restored state, but it is not allowed.
 */
@Test
public void testNonRestoredStateWhenDisallowed() throws Exception {
	final OperatorID operatorId = new OperatorID();
	final int parallelism = 9;

	final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism);
	final Map<JobVertexID, ExecutionJobVertex> tasks = Collections.emptyMap();

	try {
		Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false);
		fail("Did not throw expected Exception");
	} catch (IllegalStateException expected) {
		assertTrue(expected.getMessage().contains("allowNonRestoredState"));
	}
}
 
Example #5
Source File: StateAssignmentOperation.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that all operator states can be mapped to an execution job vertex.
 *
 * @param allowNonRestoredState if false an exception will be thrown if a state could not be mapped
 * @param operatorStates operator states to map
 * @param tasks task to map to
 */
private static void checkStateMappingCompleteness(
		boolean allowNonRestoredState,
		Map<OperatorID, OperatorState> operatorStates,
		Map<JobVertexID, ExecutionJobVertex> tasks) {

	Set<OperatorID> allOperatorIDs = new HashSet<>();
	for (ExecutionJobVertex executionJobVertex : tasks.values()) {
		allOperatorIDs.addAll(executionJobVertex.getOperatorIDs());
	}
	for (Map.Entry<OperatorID, OperatorState> operatorGroupStateEntry : operatorStates.entrySet()) {
		OperatorState operatorState = operatorGroupStateEntry.getValue();
		//----------------------------------------find operator for state---------------------------------------------

		if (!allOperatorIDs.contains(operatorGroupStateEntry.getKey())) {
			if (allowNonRestoredState) {
				LOG.info("Skipped checkpoint state for operator {}.", operatorState.getOperatorID());
			} else {
				throw new IllegalStateException("There is no operator for the state " + operatorState.getOperatorID());
			}
		}
	}
}
 
Example #6
Source File: CheckpointCoordinatorTestingUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static void verifyStateRestore(
	JobVertexID jobVertexID, ExecutionJobVertex executionJobVertex,
	List<KeyGroupRange> keyGroupPartitions) throws Exception {

	for (int i = 0; i < executionJobVertex.getParallelism(); i++) {

		JobManagerTaskRestore taskRestore = executionJobVertex.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskRestore();
		Assert.assertEquals(1L, taskRestore.getRestoreCheckpointId());
		TaskStateSnapshot stateSnapshot = taskRestore.getTaskStateSnapshot();

		OperatorSubtaskState operatorState = stateSnapshot.getSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID));

		ChainedStateHandle<OperatorStateHandle> expectedOpStateBackend =
			generateChainedPartitionableStateHandle(jobVertexID, i, 2, 8, false);

		assertTrue(CommonTestUtils.isStreamContentEqual(
			expectedOpStateBackend.get(0).openInputStream(),
			operatorState.getManagedOperatorState().iterator().next().openInputStream()));

		KeyGroupsStateHandle expectPartitionedKeyGroupState = generateKeyGroupState(
			jobVertexID, keyGroupPartitions.get(i), false);
		compareKeyedState(Collections.singletonList(expectPartitionedKeyGroupState), operatorState.getManagedKeyedState());
	}
}
 
Example #7
Source File: StateAssignmentOperation.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that all operator states can be mapped to an execution job vertex.
 *
 * @param allowNonRestoredState if false an exception will be thrown if a state could not be mapped
 * @param operatorStates operator states to map
 * @param tasks task to map to
 */
private static void checkStateMappingCompleteness(
		boolean allowNonRestoredState,
		Map<OperatorID, OperatorState> operatorStates,
		Set<ExecutionJobVertex> tasks) {

	Set<OperatorID> allOperatorIDs = new HashSet<>();
	for (ExecutionJobVertex executionJobVertex : tasks) {
		for (OperatorIDPair operatorIDPair : executionJobVertex.getOperatorIDs()) {
			allOperatorIDs.add(operatorIDPair.getGeneratedOperatorID());
			operatorIDPair.getUserDefinedOperatorID().ifPresent(allOperatorIDs::add);
		}
	}
	for (Map.Entry<OperatorID, OperatorState> operatorGroupStateEntry : operatorStates.entrySet()) {
		OperatorState operatorState = operatorGroupStateEntry.getValue();
		//----------------------------------------find operator for state---------------------------------------------

		if (!allOperatorIDs.contains(operatorGroupStateEntry.getKey())) {
			if (allowNonRestoredState) {
				LOG.info("Skipped checkpoint state for operator {}.", operatorState.getOperatorID());
			} else {
				throw new IllegalStateException("There is no operator for the state " + operatorState.getOperatorID());
			}
		}
	}
}
 
Example #8
Source File: FlinkUtils.java    From flink-crawler with Apache License 2.0 6 votes vote down vote up
/**
 * Return an String key that will get partitioned to the target <operatorIndex>, given the workflow's
 * <maxParallelism> (for key groups) and the operator <parallelism>.
 * 
 * @param format
 *            - format for key that we'll append to (must have one %d param in it)
 * @param maxParallelism
 * @param parallelism
 * @param operatorIndex
 * @return Integer suitable for use in a record as the key.
 */
public static String makeKeyForOperatorIndex(String format, int maxParallelism, int parallelism,
        int operatorIndex) {
    if (!format.contains("%d")) {
        throw new IllegalArgumentException("Format string must contain %d");
    }

    if (maxParallelism == ExecutionJobVertex.VALUE_NOT_SET) {
        maxParallelism = KeyGroupRangeAssignment.computeDefaultMaxParallelism(parallelism);
    }

    for (int i = 0; i < maxParallelism * 2; i++) {
        String key = String.format(format, i);
        int index = getOperatorIndexForKey(key, maxParallelism, parallelism);
        if (index == operatorIndex) {
            return key;
        }
    }

    throw new RuntimeException(String.format(
            "Unable to find key for target operator index %d (max parallelism = %d, parallelism = %d",
            operatorIndex, maxParallelism, parallelism));
}
 
Example #9
Source File: CheckpointMetadataLoadingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that savepoint loading fails when there is a max-parallelism mismatch.
 */
@Test
public void testMaxParallelismMismatch() throws Exception {
	final OperatorID operatorId = new OperatorID();
	final int parallelism = 128128;

	final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism);
	final Map<JobVertexID, ExecutionJobVertex> tasks = createTasks(operatorId, parallelism, parallelism + 1);

	try {
		Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false);
		fail("Did not throw expected Exception");
	} catch (IllegalStateException expected) {
		assertTrue(expected.getMessage().contains("Max parallelism mismatch"));
	}
}
 
Example #10
Source File: CheckpointCoordinatorTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static void verifyStateRestore(
		JobVertexID jobVertexID, ExecutionJobVertex executionJobVertex,
		List<KeyGroupRange> keyGroupPartitions) throws Exception {

	for (int i = 0; i < executionJobVertex.getParallelism(); i++) {

		JobManagerTaskRestore taskRestore = executionJobVertex.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskRestore();
		Assert.assertEquals(1L, taskRestore.getRestoreCheckpointId());
		TaskStateSnapshot stateSnapshot = taskRestore.getTaskStateSnapshot();

		OperatorSubtaskState operatorState = stateSnapshot.getSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID));

		ChainedStateHandle<OperatorStateHandle> expectedOpStateBackend =
				generateChainedPartitionableStateHandle(jobVertexID, i, 2, 8, false);

		assertTrue(CommonTestUtils.isSteamContentEqual(
				expectedOpStateBackend.get(0).openInputStream(),
				operatorState.getManagedOperatorState().iterator().next().openInputStream()));

		KeyGroupsStateHandle expectPartitionedKeyGroupState = generateKeyGroupState(
				jobVertexID, keyGroupPartitions.get(i), false);
		compareKeyedState(Collections.singletonList(expectPartitionedKeyGroupState), operatorState.getManagedKeyedState());
	}
}
 
Example #11
Source File: StateAssignmentOperationTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Check that channel and operator states are assigned to the same tasks on recovery.
 */
@Test
public void testChannelStateAssignmentStability() throws JobException, JobExecutionException {
	int numOperators = 10; // note: each operator is places into a separate vertex
	int numSubTasks = 100;

	Set<OperatorID> operatorIds = buildOperatorIds(numOperators);
	Map<OperatorID, OperatorState> states = buildOperatorStates(operatorIds, numSubTasks);
	Map<OperatorID, ExecutionJobVertex> vertices = buildVertices(operatorIds, numSubTasks);

	new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false).assignStates();

	for (OperatorID operatorId : operatorIds) {
		for (int subtaskIdx = 0; subtaskIdx < numSubTasks; subtaskIdx++) {
			Assert.assertEquals(
				states.get(operatorId).getState(subtaskIdx),
				getAssignedState(vertices.get(operatorId), operatorId, subtaskIdx));
		}
	}
}
 
Example #12
Source File: CheckpointMetadataLoadingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private static Map<JobVertexID, ExecutionJobVertex> createTasks(OperatorID operatorId, int parallelism, int maxParallelism) {
	final JobVertexID vertexId = new JobVertexID(operatorId.getLowerPart(), operatorId.getUpperPart());

	ExecutionJobVertex vertex = mock(ExecutionJobVertex.class);
	when(vertex.getParallelism()).thenReturn(parallelism);
	when(vertex.getMaxParallelism()).thenReturn(maxParallelism);
	when(vertex.getOperatorIDs()).thenReturn(Collections.singletonList(OperatorIDPair.generatedIDOnly(operatorId)));

	if (parallelism != maxParallelism) {
		when(vertex.isMaxParallelismConfigured()).thenReturn(true);
	}

	Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>();
	tasks.put(vertexId, vertex);

	return tasks;
}
 
Example #13
Source File: DefaultFailoverTopology.java    From flink with Apache License 2.0 6 votes vote down vote up
public DefaultFailoverTopology(ExecutionGraph executionGraph) {
	checkNotNull(executionGraph);

	this.containsCoLocationConstraints = executionGraph.getAllVertices().values().stream()
		.map(ExecutionJobVertex::getCoLocationGroup)
		.anyMatch(Objects::nonNull);

	// generate vertices
	this.failoverVertices = new ArrayList<>();
	final Map<ExecutionVertex, DefaultFailoverVertex> failoverVertexMap = new IdentityHashMap<>();
	for (ExecutionVertex vertex : executionGraph.getAllExecutionVertices()) {
		final DefaultFailoverVertex failoverVertex = new DefaultFailoverVertex(
			new ExecutionVertexID(vertex.getJobvertexId(), vertex.getParallelSubtaskIndex()),
			vertex.getTaskNameWithSubtaskIndex());
		this.failoverVertices.add(failoverVertex);
		failoverVertexMap.put(vertex, failoverVertex);
	}

	// generate edges
	connectVerticesWithEdges(failoverVertexMap);
}
 
Example #14
Source File: FailoverRegion.java    From flink with Apache License 2.0 5 votes vote down vote up
public FailoverRegion(
	ExecutionGraph executionGraph,
	List<ExecutionVertex> connectedExecutions,
	Map<JobVertexID, ExecutionJobVertex> tasks) {

	this.executionGraph = checkNotNull(executionGraph);
	this.connectedExecutionVertexes = checkNotNull(connectedExecutions);
	this.tasks = checkNotNull(tasks);

	LOG.debug("Created failover region {} with vertices: {}", id, connectedExecutions);
}
 
Example #15
Source File: BackPressureStatsTrackerImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ExecutionJobVertex createExecutionJobVertex() {
	try {
		return ExecutionJobVertexTest.createExecutionJobVertex(4, 4);
	} catch (Exception e) {
		throw new RuntimeException("Failed to create ExecutionJobVertex.");
	}
}
 
Example #16
Source File: CheckpointCoordinatorTestingUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
static ExecutionJobVertex mockExecutionJobVertex(
	JobVertexID jobVertexID,
	int parallelism,
	int maxParallelism) throws Exception {

	return mockExecutionJobVertex(
		jobVertexID,
		Collections.singletonList(OperatorID.fromJobVertexID(jobVertexID)),
		parallelism,
		maxParallelism
	);
}
 
Example #17
Source File: CheckpointStatsTrackerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a "disabled" checkpoint tracker for tests.
 */
static CheckpointStatsTracker createTestTracker() {
	ExecutionJobVertex jobVertex = mock(ExecutionJobVertex.class);
	when(jobVertex.getJobVertexId()).thenReturn(new JobVertexID());
	when(jobVertex.getParallelism()).thenReturn(1);

	return new CheckpointStatsTracker(
		0,
		Collections.singletonList(jobVertex),
		mock(CheckpointCoordinatorConfiguration.class),
		new UnregisteredMetricsGroup());
}
 
Example #18
Source File: RestartPipelinedRegionStrategy.java    From flink with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected Map<JobVertexID, ExecutionJobVertex> initTasks(List<ExecutionVertex> connectedExecutions) {
	Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>(connectedExecutions.size());
	for (ExecutionVertex executionVertex : connectedExecutions) {
		JobVertexID jobvertexId = executionVertex.getJobvertexId();
		ExecutionJobVertex jobVertex = executionVertex.getJobVertex();
		tasks.putIfAbsent(jobvertexId, jobVertex);
	}
	return tasks;
}
 
Example #19
Source File: AdaptedRestartPipelinedRegionStrategyNG.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void notifyNewVertices(final List<ExecutionJobVertex> newJobVerticesTopological) {
	// build the underlying new generation failover strategy when the executionGraph vertices are all added,
	// otherwise the failover topology will not be correctly built.
	// currently it's safe to add it here, as this method is invoked only once in production code.
	checkState(restartPipelinedRegionStrategy == null, "notifyNewVertices() must be called only once");
	this.restartPipelinedRegionStrategy = new RestartPipelinedRegionStrategy(
		new DefaultFailoverTopology(executionGraph), executionGraph.getResultPartitionAvailabilityChecker());
}
 
Example #20
Source File: RestartIndividualStrategy.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void notifyNewVertices(List<ExecutionJobVertex> newJobVerticesTopological) {
	// we validate here that the vertices are in fact not connected to
	// any other vertices
	for (ExecutionJobVertex ejv : newJobVerticesTopological) {
		List<IntermediateResult> inputs = ejv.getInputs();
		IntermediateResult[] outputs = ejv.getProducedDataSets();

		if ((inputs != null && inputs.size() > 0) || (outputs != null && outputs.length > 0)) {
			throw new FlinkRuntimeException("Incompatible failover strategy - strategy '" + 
					getStrategyName() + "' can only handle jobs with only disconnected tasks.");
		}
	}
}
 
Example #21
Source File: StateAssignmentOperation.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies conditions in regards to parallelism and maxParallelism that must be met when restoring state.
 *
 * @param operatorState      state to restore
 * @param executionJobVertex task for which the state should be restored
 */
private static void checkParallelismPreconditions(OperatorState operatorState, ExecutionJobVertex executionJobVertex) {
	//----------------------------------------max parallelism preconditions-------------------------------------

	if (operatorState.getMaxParallelism() < executionJobVertex.getParallelism()) {
		throw new IllegalStateException("The state for task " + executionJobVertex.getJobVertexId() +
			" can not be restored. The maximum parallelism (" + operatorState.getMaxParallelism() +
			") of the restored state is lower than the configured parallelism (" + executionJobVertex.getParallelism() +
			"). Please reduce the parallelism of the task to be lower or equal to the maximum parallelism."
		);
	}

	// check that the number of key groups have not changed or if we need to override it to satisfy the restored state
	if (operatorState.getMaxParallelism() != executionJobVertex.getMaxParallelism()) {

		if (!executionJobVertex.isMaxParallelismConfigured()) {
			// if the max parallelism was not explicitly specified by the user, we derive it from the state

			LOG.debug("Overriding maximum parallelism for JobVertex {} from {} to {}",
				executionJobVertex.getJobVertexId(), executionJobVertex.getMaxParallelism(), operatorState.getMaxParallelism());

			executionJobVertex.setMaxParallelism(operatorState.getMaxParallelism());
		} else {
			// if the max parallelism was explicitly specified, we complain on mismatch
			throw new IllegalStateException("The maximum parallelism (" +
				operatorState.getMaxParallelism() + ") with which the latest " +
				"checkpoint of the execution job vertex " + executionJobVertex +
				" has been taken and the current maximum parallelism (" +
				executionJobVertex.getMaxParallelism() + ") changed. This " +
				"is currently not supported.");
		}
	}
}
 
Example #22
Source File: CheckpointStatsTracker.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new checkpoint stats tracker.
 *
 * @param numRememberedCheckpoints Maximum number of checkpoints to remember, including in progress ones.
 * @param jobVertices Job vertices involved in the checkpoints.
 * @param jobCheckpointingConfiguration Checkpointing configuration.
 * @param metricGroup Metric group for exposed metrics
 */
public CheckpointStatsTracker(
	int numRememberedCheckpoints,
	List<ExecutionJobVertex> jobVertices,
	CheckpointCoordinatorConfiguration jobCheckpointingConfiguration,
	MetricGroup metricGroup) {

	checkArgument(numRememberedCheckpoints >= 0, "Negative number of remembered checkpoints");
	this.history = new CheckpointStatsHistory(numRememberedCheckpoints);
	this.jobVertices = checkNotNull(jobVertices, "JobVertices");
	this.jobCheckpointingConfiguration = checkNotNull(jobCheckpointingConfiguration);

	// Compute the total subtask count. We do this here in order to only
	// do it once.
	int count = 0;
	for (ExecutionJobVertex vertex : jobVertices) {
		count += vertex.getParallelism();
	}
	this.totalSubtaskCount = count;

	// Latest snapshot is empty
	latestSnapshot = new CheckpointStatsSnapshot(
		counts.createSnapshot(),
		summary.createSnapshot(),
		history.createSnapshot(),
		null);

	// Register the metrics
	registerMetrics(metricGroup);
}
 
Example #23
Source File: CheckpointCoordinator.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Restore the state with given savepoint.
 *
 * @param savepointPointer The pointer to the savepoint.
 * @param allowNonRestored True if allowing checkpoint state that cannot be
 *                         mapped to any job vertex in tasks.
 * @param tasks            Map of job vertices to restore. State for these
 *                         vertices is restored via
 *                         {@link Execution#setInitialState(JobManagerTaskRestore)}.
 * @param userClassLoader  The class loader to resolve serialized classes in
 *                         legacy savepoint versions.
 */
public boolean restoreSavepoint(
		String savepointPointer,
		boolean allowNonRestored,
		Map<JobVertexID, ExecutionJobVertex> tasks,
		ClassLoader userClassLoader) throws Exception {

	Preconditions.checkNotNull(savepointPointer, "The savepoint path cannot be null.");

	LOG.info("Starting job {} from savepoint {} ({})",
			job, savepointPointer, (allowNonRestored ? "allowing non restored state" : ""));

	final CompletedCheckpointStorageLocation checkpointLocation = checkpointStorage.resolveCheckpoint(savepointPointer);

	// Load the savepoint as a checkpoint into the system
	CompletedCheckpoint savepoint = Checkpoints.loadAndValidateCheckpoint(
			job, tasks, checkpointLocation, userClassLoader, allowNonRestored);

	completedCheckpointStore.addCheckpoint(savepoint);

	// Reset the checkpoint ID counter
	long nextCheckpointId = savepoint.getCheckpointID() + 1;
	checkpointIdCounter.setCount(nextCheckpointId);

	LOG.info("Reset the checkpoint ID of job {} to {}.", job, nextCheckpointId);

	return restoreLatestCheckpointedState(tasks, true, allowNonRestored);
}
 
Example #24
Source File: KvStateLocationRegistryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that registrations with duplicate names throw an Exception.
 */
@Test
public void testRegisterDuplicateName() throws Exception {
	ExecutionJobVertex[] vertices = new ExecutionJobVertex[] {
			createJobVertex(32),
			createJobVertex(13) };

	Map<JobVertexID, ExecutionJobVertex> vertexMap = createVertexMap(vertices);

	String registrationName = "duplicated-name";
	KvStateLocationRegistry registry = new KvStateLocationRegistry(new JobID(), vertexMap);

	// First operator registers
	registry.notifyKvStateRegistered(
			vertices[0].getJobVertexId(),
			new KeyGroupRange(0, 0),
			registrationName,
			new KvStateID(),
			new InetSocketAddress(InetAddress.getLocalHost(), 12328));

	try {
		// Second operator registers same name
		registry.notifyKvStateRegistered(
				vertices[1].getJobVertexId(),
				new KeyGroupRange(0, 0),
				registrationName,
				new KvStateID(),
				new InetSocketAddress(InetAddress.getLocalHost(), 12032));

		fail("Did not throw expected Exception after duplicated name");
	} catch (IllegalStateException ignored) {
		// Expected
	}
}
 
Example #25
Source File: KvStateLocationRegistryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ExecutionJobVertex createJobVertex(int maxParallelism) {
	JobVertexID id = new JobVertexID();
	ExecutionJobVertex vertex = mock(ExecutionJobVertex.class);

	when(vertex.getJobVertexId()).thenReturn(id);
	when(vertex.getMaxParallelism()).thenReturn(maxParallelism);

	return vertex;
}
 
Example #26
Source File: SchedulerTestUtils.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static Execution getDummyTask() {
	ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);

	ExecutionVertex vertex = mock(ExecutionVertex.class);
	when(vertex.getJobId()).thenReturn(new JobID());
	when(vertex.toString()).thenReturn("TEST-VERTEX");
	when(vertex.getJobVertex()).thenReturn(executionJobVertex);
	when(vertex.getJobvertexId()).thenReturn(new JobVertexID());

	Execution execution = mock(Execution.class);
	when(execution.getVertex()).thenReturn(vertex);
	
	return execution;
}
 
Example #27
Source File: SchedulerTestUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static Execution getDummyTask() {
	ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);

	ExecutionVertex vertex = mock(ExecutionVertex.class);
	when(vertex.getJobId()).thenReturn(new JobID());
	when(vertex.toString()).thenReturn("TEST-VERTEX");
	when(vertex.getJobVertex()).thenReturn(executionJobVertex);
	when(vertex.getJobvertexId()).thenReturn(new JobVertexID());

	Execution execution = mock(Execution.class);
	when(execution.getVertex()).thenReturn(vertex);
	
	return execution;
}
 
Example #28
Source File: KvStateLocationRegistryTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private ExecutionJobVertex createJobVertex(int maxParallelism) {
	JobVertexID id = new JobVertexID();
	ExecutionJobVertex vertex = mock(ExecutionJobVertex.class);

	when(vertex.getJobVertexId()).thenReturn(id);
	when(vertex.getMaxParallelism()).thenReturn(maxParallelism);

	return vertex;
}
 
Example #29
Source File: KvStateLocationRegistryTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests exception on unregistration before registration.
 */
@Test
public void testUnregisterBeforeRegister() throws Exception {
	ExecutionJobVertex vertex = createJobVertex(4);
	Map<JobVertexID, ExecutionJobVertex> vertexMap = createVertexMap(vertex);

	KvStateLocationRegistry registry = new KvStateLocationRegistry(new JobID(), vertexMap);
	try {
		registry.notifyKvStateUnregistered(vertex.getJobVertexId(), new KeyGroupRange(0, 0), "any-name");
		fail("Did not throw expected Exception, because of missing registration");
	} catch (IllegalArgumentException ignored) {
		// Expected
	}
}
 
Example #30
Source File: CheckpointCoordinatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ExecutionJobVertex mockExecutionJobVertex(JobVertexID id, ExecutionVertex[] vertices) {
	ExecutionJobVertex vertex = mock(ExecutionJobVertex.class);
	when(vertex.getParallelism()).thenReturn(vertices.length);
	when(vertex.getMaxParallelism()).thenReturn(vertices.length);
	when(vertex.getJobVertexId()).thenReturn(id);
	when(vertex.getTaskVertices()).thenReturn(vertices);
	when(vertex.getOperatorIDs()).thenReturn(Collections.singletonList(OperatorID.fromJobVertexID(id)));
	when(vertex.getUserDefinedOperatorIDs()).thenReturn(Collections.<OperatorID>singletonList(null));

	for (ExecutionVertex v : vertices) {
		when(v.getJobVertex()).thenReturn(vertex);
	}
	return vertex;
}