org.apache.flink.runtime.webmonitor.WebMonitorUtils Java Examples

The following examples show how to use org.apache.flink.runtime.webmonitor.WebMonitorUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FileArchivedExecutionGraphStoreTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we obtain the correct collection of available job details.
 */
@Test
public void testAvailableJobDetails() throws IOException {
	final int numberExecutionGraphs = 10;
	final Collection<ArchivedExecutionGraph> executionGraphs = generateTerminalExecutionGraphs(numberExecutionGraphs);

	final Collection<JobDetails> jobDetails = executionGraphs.stream().map(WebMonitorUtils::createDetailsForJob).collect(Collectors.toList());

	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {
		for (ArchivedExecutionGraph executionGraph : executionGraphs) {
			executionGraphStore.put(executionGraph);
		}

		assertThat(executionGraphStore.getAvailableJobDetails(), Matchers.containsInAnyOrder(jobDetails.toArray()));
	}
}
 
Example #2
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we obtain the correct collection of available job details.
 */
@Test
public void testAvailableJobDetails() throws IOException {
	final int numberExecutionGraphs = 10;
	final Collection<ArchivedExecutionGraph> executionGraphs = generateTerminalExecutionGraphs(numberExecutionGraphs);

	final Collection<JobDetails> jobDetails = executionGraphs.stream().map(WebMonitorUtils::createDetailsForJob).collect(Collectors.toList());

	final File rootDir = temporaryFolder.newFolder();

	try (final FileArchivedExecutionGraphStore executionGraphStore = createDefaultExecutionGraphStore(rootDir)) {
		for (ArchivedExecutionGraph executionGraph : executionGraphs) {
			executionGraphStore.put(executionGraph);
		}

		assertThat(executionGraphStore.getAvailableJobDetails(), Matchers.containsInAnyOrder(jobDetails.toArray()));
	}
}
 
Example #3
Source File: MemoryArchivedExecutionGraphStore.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nullable
@Override
public JobDetails getAvailableJobDetails(JobID jobId) {
	final ArchivedExecutionGraph archivedExecutionGraph = serializableExecutionGraphs.get(jobId);

	if (archivedExecutionGraph != null) {
		return WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);
	} else {
		return null;
	}
}
 
Example #4
Source File: MemoryArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Nullable
@Override
public JobDetails getAvailableJobDetails(JobID jobId) {
	final ArchivedExecutionGraph archivedExecutionGraph = serializableExecutionGraphs.get(jobId);

	if (archivedExecutionGraph != null) {
		return WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);
	} else {
		return null;
	}
}
 
Example #5
Source File: FileArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void put(ArchivedExecutionGraph archivedExecutionGraph) throws IOException {
	final JobStatus jobStatus = archivedExecutionGraph.getState();
	final JobID jobId = archivedExecutionGraph.getJobID();
	final String jobName = archivedExecutionGraph.getJobName();

	Preconditions.checkArgument(
		jobStatus.isGloballyTerminalState(),
		"The job " + jobName + '(' + jobId +
			") is not in a globally terminal state. Instead it is in state " + jobStatus + '.');

	switch (jobStatus) {
		case FINISHED:
			numFinishedJobs++;
			break;
		case CANCELED:
			numCanceledJobs++;
			break;
		case FAILED:
			numFailedJobs++;
			break;
		default:
			throw new IllegalStateException("The job " + jobName + '(' +
				jobId + ") should have been in a globally terminal state. " +
				"Instead it was in state " + jobStatus + '.');
	}

	// write the ArchivedExecutionGraph to disk
	storeArchivedExecutionGraph(archivedExecutionGraph);

	final JobDetails detailsForJob = WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);

	jobDetailsCache.put(jobId, detailsForJob);
	archivedExecutionGraphCache.put(jobId, archivedExecutionGraph);
}
 
Example #6
Source File: HistoryServerArchivist.java    From flink with Apache License 2.0 5 votes vote down vote up
static HistoryServerArchivist createHistoryServerArchivist(Configuration configuration, JsonArchivist jsonArchivist, Executor ioExecutor) {
	final String configuredArchivePath = configuration.getString(JobManagerOptions.ARCHIVE_DIR);

	if (configuredArchivePath != null) {
		final Path archivePath = WebMonitorUtils.validateAndNormalizeUri(new Path(configuredArchivePath).toUri());

		return new JsonResponseHistoryServerArchivist(jsonArchivist, archivePath, ioExecutor);
	} else {
		return VoidHistoryServerArchivist.INSTANCE;
	}
}
 
Example #7
Source File: JobsOverviewHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = new MultipleJobsDetails(Collections.singleton(WebMonitorUtils.createDetailsForJob(graph)));
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singletonList(new ArchivedJson(path, json));
}
 
Example #8
Source File: JobsOverviewHandler.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = new MultipleJobsDetails(Collections.singleton(WebMonitorUtils.createDetailsForJob(graph)));
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singletonList(new ArchivedJson(path, json));
}
 
Example #9
Source File: HistoryServerArchivist.java    From flink with Apache License 2.0 5 votes vote down vote up
static HistoryServerArchivist createHistoryServerArchivist(Configuration configuration, JsonArchivist jsonArchivist) {
	final String configuredArchivePath = configuration.getString(JobManagerOptions.ARCHIVE_DIR);

	if (configuredArchivePath != null) {
		final Path archivePath = WebMonitorUtils.validateAndNormalizeUri(new Path(configuredArchivePath).toUri());

		return new JsonResponseHistoryServerArchivist(jsonArchivist, archivePath);
	} else {
		return VoidHistoryServerArchivist.INSTANCE;
	}
}
 
Example #10
Source File: MemoryArchivedExecutionGraphStore.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nullable
@Override
public JobDetails getAvailableJobDetails(JobID jobId) {
	final ArchivedExecutionGraph archivedExecutionGraph = serializableExecutionGraphs.get(jobId);

	if (archivedExecutionGraph != null) {
		return WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);
	} else {
		return null;
	}
}
 
Example #11
Source File: FileArchivedExecutionGraphStore.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void put(ArchivedExecutionGraph archivedExecutionGraph) throws IOException {
	final JobStatus jobStatus = archivedExecutionGraph.getState();
	final JobID jobId = archivedExecutionGraph.getJobID();
	final String jobName = archivedExecutionGraph.getJobName();

	Preconditions.checkArgument(
		jobStatus.isGloballyTerminalState(),
		"The job " + jobName + '(' + jobId +
			") is not in a globally terminal state. Instead it is in state " + jobStatus + '.');

	switch (jobStatus) {
		case FINISHED:
			numFinishedJobs++;
			break;
		case CANCELED:
			numCanceledJobs++;
			break;
		case FAILED:
			numFailedJobs++;
			break;
		default:
			throw new IllegalStateException("The job " + jobName + '(' +
				jobId + ") should have been in a globally terminal state. " +
				"Instead it was in state " + jobStatus + '.');
	}

	// write the ArchivedExecutionGraph to disk
	storeArchivedExecutionGraph(archivedExecutionGraph);

	final JobDetails detailsForJob = WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);

	jobDetailsCache.put(jobId, detailsForJob);
	archivedExecutionGraphCache.put(jobId, archivedExecutionGraph);
}
 
Example #12
Source File: HistoryServerArchivist.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
static HistoryServerArchivist createHistoryServerArchivist(Configuration configuration, JsonArchivist jsonArchivist) {
	final String configuredArchivePath = configuration.getString(JobManagerOptions.ARCHIVE_DIR);

	if (configuredArchivePath != null) {
		final Path archivePath = WebMonitorUtils.validateAndNormalizeUri(new Path(configuredArchivePath).toUri());

		return new JsonResponseHistoryServerArchivist(jsonArchivist, archivePath);
	} else {
		return VoidHistoryServerArchivist.INSTANCE;
	}
}
 
Example #13
Source File: JobsOverviewHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = new MultipleJobsDetails(Collections.singleton(WebMonitorUtils.createDetailsForJob(graph)));
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singletonList(new ArchivedJson(path, json));
}
 
Example #14
Source File: FileArchivedExecutionGraphStore.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void put(ArchivedExecutionGraph archivedExecutionGraph) throws IOException {
	final JobStatus jobStatus = archivedExecutionGraph.getState();
	final JobID jobId = archivedExecutionGraph.getJobID();
	final String jobName = archivedExecutionGraph.getJobName();

	Preconditions.checkArgument(
		jobStatus.isGloballyTerminalState(),
		"The job " + jobName + '(' + jobId +
			") is not in a globally terminal state. Instead it is in state " + jobStatus + '.');

	switch (jobStatus) {
		case FINISHED:
			numFinishedJobs++;
			break;
		case CANCELED:
			numCanceledJobs++;
			break;
		case FAILED:
			numFailedJobs++;
			break;
		default:
			throw new IllegalStateException("The job " + jobName + '(' +
				jobId + ") should have been in a globally terminal state. " +
				"Instead it was in state " + jobStatus + '.');
	}

	// write the ArchivedExecutionGraph to disk
	storeArchivedExecutionGraph(archivedExecutionGraph);

	final JobDetails detailsForJob = WebMonitorUtils.createDetailsForJob(archivedExecutionGraph);

	jobDetailsCache.put(jobId, detailsForJob);
	archivedExecutionGraphCache.put(jobId, archivedExecutionGraph);
}
 
Example #15
Source File: DispatcherRestEndpoint.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
protected List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> initializeHandlers(final CompletableFuture<String> localAddressFuture) {
	List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers = super.initializeHandlers(localAddressFuture);

	// Add the Dispatcher specific handlers

	final Time timeout = restConfiguration.getTimeout();

	JobSubmitHandler jobSubmitHandler = new JobSubmitHandler(
		leaderRetriever,
		timeout,
		responseHeaders,
		executor,
		clusterConfiguration);

	if (restConfiguration.isWebSubmitEnabled()) {
		try {
			webSubmissionExtension = WebMonitorUtils.loadWebSubmissionExtension(
				leaderRetriever,
				timeout,
				responseHeaders,
				localAddressFuture,
				uploadDir,
				executor,
				clusterConfiguration);

			// register extension handlers
			handlers.addAll(webSubmissionExtension.getHandlers());
		} catch (FlinkException e) {
			if (log.isDebugEnabled()) {
				log.debug("Failed to load web based job submission extension.", e);
			} else {
				log.info("Failed to load web based job submission extension. " +
					"Probable reason: flink-runtime-web is not in the classpath.");
			}
		}
	} else {
		log.info("Web-based job submission is not enabled.");
	}

	handlers.add(Tuple2.of(jobSubmitHandler.getMessageHeaders(), jobSubmitHandler));

	return handlers;
}
 
Example #16
Source File: DispatcherRestEndpoint.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
protected List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> initializeHandlers(final CompletableFuture<String> localAddressFuture) {
	List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers = super.initializeHandlers(localAddressFuture);

	// Add the Dispatcher specific handlers

	final Time timeout = restConfiguration.getTimeout();

	JobSubmitHandler jobSubmitHandler = new JobSubmitHandler(
		leaderRetriever,
		timeout,
		responseHeaders,
		executor,
		clusterConfiguration);

	if (clusterConfiguration.getBoolean(WebOptions.SUBMIT_ENABLE)) {
		try {
			webSubmissionExtension = WebMonitorUtils.loadWebSubmissionExtension(
				leaderRetriever,
				timeout,
				responseHeaders,
				localAddressFuture,
				uploadDir,
				executor,
				clusterConfiguration);

			// register extension handlers
			handlers.addAll(webSubmissionExtension.getHandlers());
		} catch (FlinkException e) {
			if (log.isDebugEnabled()) {
				log.debug("Failed to load web based job submission extension.", e);
			} else {
				log.info("Failed to load web based job submission extension. " +
					"Probable reason: flink-runtime-web is not in the classpath.");
			}
		}
	} else {
		log.info("Web-based job submission is not enabled.");
	}

	handlers.add(Tuple2.of(jobSubmitHandler.getMessageHeaders(), jobSubmitHandler));

	return handlers;
}
 
Example #17
Source File: SchedulerBase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public JobDetails requestJobDetails() {
	mainThreadExecutor.assertRunningInMainThread();
	return WebMonitorUtils.createDetailsForJob(executionGraph);
}
 
Example #18
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private static Collection<JobDetails> generateJobDetails(Collection<ArchivedExecutionGraph> executionGraphs) {
	return executionGraphs.stream().map(WebMonitorUtils::createDetailsForJob).collect(Collectors.toList());
}
 
Example #19
Source File: MemoryArchivedExecutionGraphStore.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public Collection<JobDetails> getAvailableJobDetails() {
	return serializableExecutionGraphs.values().stream()
		.map(WebMonitorUtils::createDetailsForJob)
		.collect(Collectors.toList());
}
 
Example #20
Source File: LegacyScheduler.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public JobDetails requestJobDetails() {
	mainThreadExecutor.assertRunningInMainThread();
	return WebMonitorUtils.createDetailsForJob(executionGraph);
}
 
Example #21
Source File: MemoryArchivedExecutionGraphStore.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public Collection<JobDetails> getAvailableJobDetails() {
	return serializableExecutionGraphs.values().stream()
		.map(WebMonitorUtils::createDetailsForJob)
		.collect(Collectors.toList());
}
 
Example #22
Source File: JobMaster.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public CompletableFuture<JobDetails> requestJobDetails(Time timeout) {
	final ExecutionGraph currentExecutionGraph = executionGraph;
	return CompletableFuture.supplyAsync(() -> WebMonitorUtils.createDetailsForJob(currentExecutionGraph), scheduledExecutorService);
}
 
Example #23
Source File: DispatcherRestEndpoint.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
protected List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> initializeHandlers(final CompletableFuture<String> localAddressFuture) {
	List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers = super.initializeHandlers(localAddressFuture);

	// Add the Dispatcher specific handlers

	final Time timeout = restConfiguration.getTimeout();

	JobSubmitHandler jobSubmitHandler = new JobSubmitHandler(
		leaderRetriever,
		timeout,
		responseHeaders,
		executor,
		clusterConfiguration);

	if (clusterConfiguration.getBoolean(WebOptions.SUBMIT_ENABLE)) {
		try {
			webSubmissionExtension = WebMonitorUtils.loadWebSubmissionExtension(
				leaderRetriever,
				timeout,
				responseHeaders,
				localAddressFuture,
				uploadDir,
				executor,
				clusterConfiguration);

			// register extension handlers
			handlers.addAll(webSubmissionExtension.getHandlers());
		} catch (FlinkException e) {
			if (log.isDebugEnabled()) {
				log.debug("Failed to load web based job submission extension.", e);
			} else {
				log.info("Failed to load web based job submission extension. " +
					"Probable reason: flink-runtime-web is not in the classpath.");
			}
		}
	} else {
		log.info("Web-based job submission is not enabled.");
	}

	handlers.add(Tuple2.of(jobSubmitHandler.getMessageHeaders(), jobSubmitHandler));

	return handlers;
}
 
Example #24
Source File: MemoryArchivedExecutionGraphStore.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public Collection<JobDetails> getAvailableJobDetails() {
	return serializableExecutionGraphs.values().stream()
		.map(WebMonitorUtils::createDetailsForJob)
		.collect(Collectors.toList());
}