org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph Java Examples

The following examples show how to use org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Dispatcher.java    From flink with Apache License 2.0 6 votes vote down vote up
private void archiveExecutionGraph(ArchivedExecutionGraph archivedExecutionGraph) {
	try {
		archivedExecutionGraphStore.put(archivedExecutionGraph);
	} catch (IOException e) {
		log.info(
			"Could not store completed job {}({}).",
			archivedExecutionGraph.getJobName(),
			archivedExecutionGraph.getJobID(),
			e);
	}

	final CompletableFuture<Acknowledge> executionGraphFuture = historyServerArchivist.archiveExecutionGraph(archivedExecutionGraph);

	executionGraphFuture.whenComplete(
		(Acknowledge ignored, Throwable throwable) -> {
			if (throwable != null) {
				log.info(
					"Could not archive completed job {}({}) to the history server.",
					archivedExecutionGraph.getJobName(),
					archivedExecutionGraph.getJobID(),
					throwable);
			}
		});
}
 
Example #2
Source File: JobManagerRunnerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testShutDown() throws Exception {
	final JobManagerRunner jobManagerRunner = createJobManagerRunner();

	try {
		jobManagerRunner.start();

		final CompletableFuture<ArchivedExecutionGraph> resultFuture = jobManagerRunner.getResultFuture();

		assertThat(resultFuture.isDone(), is(false));

		jobManagerRunner.closeAsync();

		try {
			resultFuture.get();
			fail("Should have failed.");
		} catch (ExecutionException ee) {
			assertThat(ExceptionUtils.stripExecutionException(ee), instanceOf(JobNotFinishedException.class));
		}
	} finally {
		jobManagerRunner.close();
	}
}
 
Example #3
Source File: Dispatcher.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<JobResult> requestJobResult(JobID jobId, Time timeout) {
	final CompletableFuture<JobManagerRunner> jobManagerRunnerFuture = jobManagerRunnerFutures.get(jobId);

	if (jobManagerRunnerFuture == null) {
		final ArchivedExecutionGraph archivedExecutionGraph = archivedExecutionGraphStore.get(jobId);

		if (archivedExecutionGraph == null) {
			return FutureUtils.completedExceptionally(new FlinkJobNotFoundException(jobId));
		} else {
			return CompletableFuture.completedFuture(JobResult.createFrom(archivedExecutionGraph));
		}
	} else {
		return jobManagerRunnerFuture.thenCompose(JobManagerRunner::getResultFuture).thenApply(JobResult::createFrom);
	}
}
 
Example #4
Source File: ArchivedExecutionGraphBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
public ArchivedExecutionGraph build() {
	JobID jobID = this.jobID != null ? this.jobID : new JobID();
	String jobName = this.jobName != null ? this.jobName : "job_" + RANDOM.nextInt();

	if (tasks == null) {
		tasks = Collections.emptyMap();
	}

	return new ArchivedExecutionGraph(
		jobID,
		jobName,
		tasks,
		verticesInCreationOrder != null ? verticesInCreationOrder : new ArrayList<>(tasks.values()),
		stateTimestamps != null ? stateTimestamps : new long[JobStatus.values().length],
		state != null ? state : JobStatus.FINISHED,
		failureCause,
		jsonPlan != null ? jsonPlan : "{\"jobid\":\"" + jobID + "\", \"name\":\"" + jobName + "\", \"nodes\":[]}",
		archivedUserAccumulators != null ? archivedUserAccumulators : new StringifiedAccumulatorResult[0],
		serializedUserAccumulators != null ? serializedUserAccumulators : Collections.emptyMap(),
		archivedExecutionConfig != null ? archivedExecutionConfig : new ArchivedExecutionConfigBuilder().build(),
		isStoppable,
		null,
		null,
		"stateBackendName"
	);
}
 
Example #5
Source File: JobManagerRunnerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testJobCompletion() throws Exception {
	final JobManagerRunner jobManagerRunner = createJobManagerRunner();

	try {
		jobManagerRunner.start();

		final CompletableFuture<ArchivedExecutionGraph> resultFuture = jobManagerRunner.getResultFuture();

		assertThat(resultFuture.isDone(), is(false));

		jobManagerRunner.jobReachedGloballyTerminalState(archivedExecutionGraph);

		assertThat(resultFuture.get(), is(archivedExecutionGraph));
	} finally {
		jobManagerRunner.close();
	}
}
 
Example #6
Source File: DispatcherResourceCleanupTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testHABlobsAreNotRemovedIfHAJobGraphRemovalFails() throws Exception {
	submittedJobGraphStore.setRemovalFailure(new Exception("Failed to Remove future"));
	submitJob();

	ArchivedExecutionGraph executionGraph = new ArchivedExecutionGraphBuilder()
		.setJobID(jobId)
		.setState(JobStatus.CANCELED)
		.build();

	resultFuture.complete(executionGraph);
	terminationFuture.complete(null);

	assertThat(cleanupJobFuture.get(), equalTo(jobId));
	assertThat(deleteAllHABlobsFuture.isDone(), is(false));
}
 
Example #7
Source File: FileArchivedExecutionGraphStoreTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we can put {@link ArchivedExecutionGraph} into the
 * {@link FileArchivedExecutionGraphStore} and that the graph is persisted.
 */
@Test
public void testPut() throws IOException {
	final ArchivedExecutionGraph dummyExecutionGraph = new ArchivedExecutionGraphBuilder().setState(JobStatus.FINISHED).build();
	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {

		final File storageDirectory = executionGraphStore.getStorageDir();

		// check that the storage directory is empty
		assertThat(storageDirectory.listFiles().length, Matchers.equalTo(0));

		executionGraphStore.put(dummyExecutionGraph);

		// check that we have persisted the given execution graph
		assertThat(storageDirectory.listFiles().length, Matchers.equalTo(1));

		assertThat(executionGraphStore.get(dummyExecutionGraph.getJobID()), new PartialArchivedExecutionGraphMatcher(dummyExecutionGraph));
	}
}
 
Example #8
Source File: FileArchivedExecutionGraphStoreTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we obtain the correct jobs overview.
 */
@Test
public void testStoredJobsOverview() throws IOException {
	final int numberExecutionGraphs = 10;
	final Collection<ArchivedExecutionGraph> executionGraphs = generateTerminalExecutionGraphs(numberExecutionGraphs);

	final List<JobStatus> jobStatuses = executionGraphs.stream().map(ArchivedExecutionGraph::getState).collect(Collectors.toList());

	final JobsOverview expectedJobsOverview = JobsOverview.create(jobStatuses);

	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {
		for (ArchivedExecutionGraph executionGraph : executionGraphs) {
			executionGraphStore.put(executionGraph);
		}

		assertThat(executionGraphStore.getStoredJobsOverview(), Matchers.equalTo(expectedJobsOverview));
	}
}
 
Example #9
Source File: FileArchivedExecutionGraphStoreTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we obtain the correct collection of available job details.
 */
@Test
public void testAvailableJobDetails() throws IOException {
	final int numberExecutionGraphs = 10;
	final Collection<ArchivedExecutionGraph> executionGraphs = generateTerminalExecutionGraphs(numberExecutionGraphs);

	final Collection<JobDetails> jobDetails = executionGraphs.stream().map(WebMonitorUtils::createDetailsForJob).collect(Collectors.toList());

	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {
		for (ArchivedExecutionGraph executionGraph : executionGraphs) {
			executionGraphStore.put(executionGraph);
		}

		assertThat(executionGraphStore.getAvailableJobDetails(), Matchers.containsInAnyOrder(jobDetails.toArray()));
	}
}
 
Example #10
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we obtain the correct jobs overview.
 */
@Test
public void testStoredJobsOverview() throws IOException {
	final int numberExecutionGraphs = 10;
	final Collection<ArchivedExecutionGraph> executionGraphs = generateTerminalExecutionGraphs(numberExecutionGraphs);

	final List<JobStatus> jobStatuses = executionGraphs.stream().map(ArchivedExecutionGraph::getState).collect(Collectors.toList());

	final JobsOverview expectedJobsOverview = JobsOverview.create(jobStatuses);

	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {
		for (ArchivedExecutionGraph executionGraph : executionGraphs) {
			executionGraphStore.put(executionGraph);
		}

		assertThat(executionGraphStore.getStoredJobsOverview(), Matchers.equalTo(expectedJobsOverview));
	}
}
 
Example #11
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public boolean matches(Object o) {
	if (archivedExecutionGraph == o) {
		return true;
	}
	if (o == null || archivedExecutionGraph.getClass() != o.getClass()) {
		return false;
	}
	ArchivedExecutionGraph that = (ArchivedExecutionGraph) o;
	return archivedExecutionGraph.isStoppable() == that.isStoppable() &&
		Objects.equals(archivedExecutionGraph.getJobID(), that.getJobID()) &&
		Objects.equals(archivedExecutionGraph.getJobName(), that.getJobName()) &&
		archivedExecutionGraph.getState() == that.getState() &&
		Objects.equals(archivedExecutionGraph.getJsonPlan(), that.getJsonPlan()) &&
		Objects.equals(archivedExecutionGraph.getAccumulatorsSerialized(), that.getAccumulatorsSerialized()) &&
		Objects.equals(archivedExecutionGraph.getCheckpointCoordinatorConfiguration(), that.getCheckpointCoordinatorConfiguration()) &&
		archivedExecutionGraph.getAllVertices().size() == that.getAllVertices().size();
}
 
Example #12
Source File: JobManagerRunnerTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testJobCompletion() throws Exception {
	final JobManagerRunner jobManagerRunner = createJobManagerRunner();

	try {
		jobManagerRunner.start();

		final CompletableFuture<ArchivedExecutionGraph> resultFuture = jobManagerRunner.getResultFuture();

		assertThat(resultFuture.isDone(), is(false));

		jobManagerRunner.jobReachedGloballyTerminalState(archivedExecutionGraph);

		assertThat(resultFuture.get(), is(archivedExecutionGraph));
	} finally {
		jobManagerRunner.close();
	}
}
 
Example #13
Source File: JobManagerRunnerTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testJobFinishedByOther() throws Exception {
	final JobManagerRunner jobManagerRunner = createJobManagerRunner();

	try {
		jobManagerRunner.start();

		final CompletableFuture<ArchivedExecutionGraph> resultFuture = jobManagerRunner.getResultFuture();

		assertThat(resultFuture.isDone(), is(false));

		jobManagerRunner.jobFinishedByOther();

		try {
			resultFuture.get();
			fail("Should have failed.");
		} catch (ExecutionException ee) {
			assertThat(ExceptionUtils.stripExecutionException(ee), instanceOf(JobNotFinishedException.class));
		}
	} finally {
		jobManagerRunner.close();
	}
}
 
Example #14
Source File: ArchivedExecutionGraphBuilder.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public ArchivedExecutionGraph build() {
	JobID jobID = this.jobID != null ? this.jobID : new JobID();
	String jobName = this.jobName != null ? this.jobName : "job_" + RANDOM.nextInt();

	if (tasks == null) {
		tasks = Collections.emptyMap();
	}

	return new ArchivedExecutionGraph(
		jobID,
		jobName,
		tasks,
		verticesInCreationOrder != null ? verticesInCreationOrder : new ArrayList<>(tasks.values()),
		stateTimestamps != null ? stateTimestamps : new long[JobStatus.values().length],
		state != null ? state : JobStatus.FINISHED,
		failureCause,
		jsonPlan != null ? jsonPlan : "{\"jobid\":\"" + jobID + "\", \"name\":\"" + jobName + "\", \"nodes\":[]}",
		archivedUserAccumulators != null ? archivedUserAccumulators : new StringifiedAccumulatorResult[0],
		serializedUserAccumulators != null ? serializedUserAccumulators : Collections.emptyMap(),
		archivedExecutionConfig != null ? archivedExecutionConfig : new ArchivedExecutionConfigBuilder().build(),
		isStoppable,
		null,
		null
	);
}
 
Example #15
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we obtain the correct jobs overview.
 */
@Test
public void testStoredJobsOverview() throws IOException {
	final int numberExecutionGraphs = 10;
	final Collection<ArchivedExecutionGraph> executionGraphs = generateTerminalExecutionGraphs(numberExecutionGraphs);

	final List<JobStatus> jobStatuses = executionGraphs.stream().map(ArchivedExecutionGraph::getState).collect(Collectors.toList());

	final JobsOverview expectedJobsOverview = JobsOverview.create(jobStatuses);

	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {
		for (ArchivedExecutionGraph executionGraph : executionGraphs) {
			executionGraphStore.put(executionGraph);
		}

		assertThat(executionGraphStore.getStoredJobsOverview(), Matchers.equalTo(expectedJobsOverview));
	}
}
 
Example #16
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ArchivedExecutionGraph requestExecutionGraph(final JobMasterGateway jobMasterGateway) {
	try {
		return jobMasterGateway.requestJob(testingTimeout).get();
	} catch (InterruptedException | ExecutionException e) {
		throw new RuntimeException(e);
	}
}
 
Example #17
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static Collection<AccessExecution> getExecutions(final JobMasterGateway jobMasterGateway) {
	final ArchivedExecutionGraph archivedExecutionGraph = requestExecutionGraph(jobMasterGateway);

	return archivedExecutionGraph.getAllVertices().values()
		.stream()
		.flatMap(vertex -> Arrays.stream(vertex.getTaskVertices()))
		.map(AccessExecutionVertex::getCurrentExecutionAttempt)
		.collect(Collectors.toList());
}
 
Example #18
Source File: FileArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void put(ArchivedExecutionGraph archivedExecutionGraph) throws IOException {
	final JobStatus jobStatus = archivedExecutionGraph.getState();
	final JobID jobId = archivedExecutionGraph.getJobID();
	final String jobName = archivedExecutionGraph.getJobName();

	Preconditions.checkArgument(
		jobStatus.isGloballyTerminalState(),
		"The job " + jobName + '(' + jobId +
			") is not in a globally terminal state. Instead it is in state " + jobStatus + '.');

	switch (jobStatus) {
		case FINISHED:
			numFinishedJobs++;
			break;
		case CANCELED:
			numCanceledJobs++;
			break;
		case FAILED:
			numFailedJobs++;
			break;
		default:
			throw new IllegalStateException("The job " + jobName + '(' +
				jobId + ") should have been in a globally terminal state. " +
				"Instead it was in state " + jobStatus + '.');
	}

	// write the ArchivedExecutionGraph to disk
	storeArchivedExecutionGraph(archivedExecutionGraph);

	final JobDetails detailsForJob = WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);

	jobDetailsCache.put(jobId, detailsForJob);
	archivedExecutionGraphCache.put(jobId, archivedExecutionGraph);
}
 
Example #19
Source File: FileArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
@Nullable
public ArchivedExecutionGraph get(JobID jobId) {
	try {
		return archivedExecutionGraphCache.get(jobId);
	} catch (ExecutionException e) {
		LOG.debug("Could not load archived execution graph for job id {}.", jobId, e);
		return null;
	}
}
 
Example #20
Source File: JobMaster.java    From flink with Apache License 2.0 5 votes vote down vote up
private void jobStatusChanged(
		final JobStatus newJobStatus,
		long timestamp,
		@Nullable final Throwable error) {
	validateRunsInMainThread();

	if (newJobStatus.isGloballyTerminalState()) {
		runAsync(() -> registeredTaskManagers.keySet()
			.forEach(partitionTracker::stopTrackingAndReleasePartitionsFor));

		final ArchivedExecutionGraph archivedExecutionGraph = schedulerNG.requestJob();
		scheduledExecutorService.execute(() -> jobCompletionActions.jobReachedGloballyTerminalState(archivedExecutionGraph));
	}
}
 
Example #21
Source File: Dispatcher.java    From flink with Apache License 2.0 5 votes vote down vote up
protected void jobReachedGloballyTerminalState(ArchivedExecutionGraph archivedExecutionGraph) {
	Preconditions.checkArgument(
		archivedExecutionGraph.getState().isGloballyTerminalState(),
		"Job %s is in state %s which is not globally terminal.",
		archivedExecutionGraph.getJobID(),
		archivedExecutionGraph.getState());

	log.info("Job {} reached globally terminal state {}.", archivedExecutionGraph.getJobID(), archivedExecutionGraph.getState());

	archiveExecutionGraph(archivedExecutionGraph);

	final JobID jobId = archivedExecutionGraph.getJobID();

	removeJobAndRegisterTerminationFuture(jobId, true);
}
 
Example #22
Source File: MiniDispatcher.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected void jobReachedGloballyTerminalState(ArchivedExecutionGraph archivedExecutionGraph) {
	super.jobReachedGloballyTerminalState(archivedExecutionGraph);

	if (executionMode == ClusterEntrypoint.ExecutionMode.DETACHED) {
		// shut down since we don't have to wait for the execution result retrieval
		jobTerminationFuture.complete(ApplicationStatus.fromJobStatus(archivedExecutionGraph.getState()));
	}
}
 
Example #23
Source File: MemoryArchivedExecutionGraphStore.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nullable
@Override
public JobDetails getAvailableJobDetails(JobID jobId) {
	final ArchivedExecutionGraph archivedExecutionGraph = serializableExecutionGraphs.get(jobId);

	if (archivedExecutionGraph != null) {
		return WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);
	} else {
		return null;
	}
}
 
Example #24
Source File: FileArchivedExecutionGraphStoreTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private Collection<ArchivedExecutionGraph> generateTerminalExecutionGraphs(int number) {
	final Collection<ArchivedExecutionGraph> executionGraphs = new ArrayList<>(number);

	for (int i = 0; i < number; i++) {
		final JobStatus state = GLOBALLY_TERMINAL_JOB_STATUS.get(ThreadLocalRandom.current().nextInt(GLOBALLY_TERMINAL_JOB_STATUS.size()));
		executionGraphs.add(
			new ArchivedExecutionGraphBuilder()
				.setState(state)
				.build());
	}

	return executionGraphs;
}
 
Example #25
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private Collection<ArchivedExecutionGraph> generateTerminalExecutionGraphs(int number) {
	final Collection<ArchivedExecutionGraph> executionGraphs = new ArrayList<>(number);

	for (int i = 0; i < number; i++) {
		final JobStatus state = GLOBALLY_TERMINAL_JOB_STATUS.get(ThreadLocalRandom.current().nextInt(GLOBALLY_TERMINAL_JOB_STATUS.size()));
		executionGraphs.add(
			new ArchivedExecutionGraphBuilder()
				.setState(state)
				.build());
	}

	return executionGraphs;
}
 
Example #26
Source File: FileArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private int calculateSize(JobID jobId, ArchivedExecutionGraph serializableExecutionGraph) {
	final File archivedExecutionGraphFile = getExecutionGraphFile(jobId);

	if (archivedExecutionGraphFile.exists()) {
		return Math.toIntExact(archivedExecutionGraphFile.length());
	} else {
		LOG.debug("Could not find archived execution graph file for {}. Estimating the size instead.", jobId);
		return serializableExecutionGraph.getAllVertices().size() * 1000 +
			serializableExecutionGraph.getAccumulatorsSerialized().size() * 1000;
	}
}
 
Example #27
Source File: FileArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private ArchivedExecutionGraph loadExecutionGraph(JobID jobId) throws IOException, ClassNotFoundException {
	final File archivedExecutionGraphFile = getExecutionGraphFile(jobId);

	if (archivedExecutionGraphFile.exists()) {
		try (FileInputStream fileInputStream = new FileInputStream(archivedExecutionGraphFile)) {
			return InstantiationUtil.deserializeObject(fileInputStream, getClass().getClassLoader());
		}
	} else {
		throw new FileNotFoundException("Could not find file for archived execution graph " + jobId +
			". This indicates that the file either has been deleted or never written.");
	}
}
 
Example #28
Source File: FileArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private void storeArchivedExecutionGraph(ArchivedExecutionGraph archivedExecutionGraph) throws IOException {
	final File archivedExecutionGraphFile = getExecutionGraphFile(archivedExecutionGraph.getJobID());

	try (FileOutputStream fileOutputStream = new FileOutputStream(archivedExecutionGraphFile)) {
		InstantiationUtil.serializeObject(fileOutputStream, archivedExecutionGraph);
	}
}
 
Example #29
Source File: MemoryArchivedExecutionGraphStore.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public JobsOverview getStoredJobsOverview() {
	Collection<JobStatus> allJobStatus = serializableExecutionGraphs.values().stream()
		.map(ArchivedExecutionGraph::getState)
		.collect(Collectors.toList());

	return JobsOverview.create(allJobStatus);
}
 
Example #30
Source File: JobManagerRunnerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Job completion notification triggered by JobManager.
 */
@Override
public void jobReachedGloballyTerminalState(ArchivedExecutionGraph executionGraph) {
	unregisterJobFromHighAvailability();
	// complete the result future with the terminal execution graph
	resultFuture.complete(executionGraph);
}