org.apache.flink.runtime.executiongraph.AccessExecutionGraph Java Examples

The following examples show how to use org.apache.flink.runtime.executiongraph.AccessExecutionGraph. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CheckpointConfigHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
private static CheckpointConfigInfo createCheckpointConfigInfo(AccessExecutionGraph executionGraph) throws RestHandlerException {
	final CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration = executionGraph.getCheckpointCoordinatorConfiguration();

	if (checkpointCoordinatorConfiguration == null) {
		throw new RestHandlerException(
			"Checkpointing is not enabled for this job (" + executionGraph.getJobID() + ").",
			HttpResponseStatus.NOT_FOUND);
	} else {
		CheckpointRetentionPolicy retentionPolicy = checkpointCoordinatorConfiguration.getCheckpointRetentionPolicy();

		CheckpointConfigInfo.ExternalizedCheckpointInfo externalizedCheckpointInfo = new CheckpointConfigInfo.ExternalizedCheckpointInfo(
				retentionPolicy != CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
				retentionPolicy != CheckpointRetentionPolicy.RETAIN_ON_CANCELLATION);

		String stateBackendName = executionGraph.getStateBackendName().orElse(null);

		return new CheckpointConfigInfo(
			checkpointCoordinatorConfiguration.isExactlyOnce() ? CheckpointConfigInfo.ProcessingMode.EXACTLY_ONCE : CheckpointConfigInfo.ProcessingMode.AT_LEAST_ONCE,
			checkpointCoordinatorConfiguration.getCheckpointInterval(),
			checkpointCoordinatorConfiguration.getCheckpointTimeout(),
			checkpointCoordinatorConfiguration.getMinPauseBetweenCheckpoints(),
			checkpointCoordinatorConfiguration.getMaxConcurrentCheckpoints(),
			externalizedCheckpointInfo,
			stateBackendName);
	}
}
 
Example #2
Source File: CheckpointConfigHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
private static CheckpointConfigInfo createCheckpointConfigInfo(AccessExecutionGraph executionGraph) throws RestHandlerException {
	final CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration = executionGraph.getCheckpointCoordinatorConfiguration();

	if (checkpointCoordinatorConfiguration == null) {
		throw new RestHandlerException(
			"Checkpointing is not enabled for this job (" + executionGraph.getJobID() + ").",
			HttpResponseStatus.NOT_FOUND);
	} else {
		CheckpointRetentionPolicy retentionPolicy = checkpointCoordinatorConfiguration.getCheckpointRetentionPolicy();

		CheckpointConfigInfo.ExternalizedCheckpointInfo externalizedCheckpointInfo = new CheckpointConfigInfo.ExternalizedCheckpointInfo(
				retentionPolicy != CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
				retentionPolicy != CheckpointRetentionPolicy.RETAIN_ON_CANCELLATION);

		return new CheckpointConfigInfo(
			checkpointCoordinatorConfiguration.isExactlyOnce() ? CheckpointConfigInfo.ProcessingMode.EXACTLY_ONCE : CheckpointConfigInfo.ProcessingMode.AT_LEAST_ONCE,
			checkpointCoordinatorConfiguration.getCheckpointInterval(),
			checkpointCoordinatorConfiguration.getCheckpointTimeout(),
			checkpointCoordinatorConfiguration.getMinPauseBetweenCheckpoints(),
			checkpointCoordinatorConfiguration.getMaxConcurrentCheckpoints(),
			externalizedCheckpointInfo);
	}
}
 
Example #3
Source File: AbstractCheckpointHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected R handleRequest(HandlerRequest<EmptyRequestBody, M> request, AccessExecutionGraph executionGraph) throws RestHandlerException {
	final long checkpointId = request.getPathParameter(CheckpointIdPathParameter.class);

	final CheckpointStatsSnapshot checkpointStatsSnapshot = executionGraph.getCheckpointStatsSnapshot();

	if (checkpointStatsSnapshot != null) {
		AbstractCheckpointStats checkpointStats = checkpointStatsSnapshot.getHistory().getCheckpointById(checkpointId);

		if (checkpointStats != null) {
			checkpointStatsCache.tryAdd(checkpointStats);
		} else {
			checkpointStats = checkpointStatsCache.tryGet(checkpointId);
		}

		if (checkpointStats != null) {
			return handleCheckpointRequest(request, checkpointStats);
		} else {
			throw new RestHandlerException("Could not find checkpointing statistics for checkpoint " + checkpointId + '.', HttpResponseStatus.NOT_FOUND);
		}
	} else {
		throw new RestHandlerException("Checkpointing was not enabled for job " + executionGraph.getJobID() + '.', HttpResponseStatus.NOT_FOUND);
	}
}
 
Example #4
Source File: TaskCheckpointStatisticDetailsHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	CheckpointStatsSnapshot stats = graph.getCheckpointStatsSnapshot();
	if (stats == null) {
		return Collections.emptyList();
	}
	CheckpointStatsHistory history = stats.getHistory();
	List<ArchivedJson> archive = new ArrayList<>(history.getCheckpoints().size());
	for (AbstractCheckpointStats checkpoint : history.getCheckpoints()) {
		for (TaskStateStats subtaskStats : checkpoint.getAllTaskStateStats()) {
			ResponseBody json = createCheckpointDetails(checkpoint, subtaskStats);
			String path = getMessageHeaders().getTargetRestEndpointURL()
				.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString())
				.replace(':' + CheckpointIdPathParameter.KEY, String.valueOf(checkpoint.getCheckpointId()))
				.replace(':' + JobVertexIdPathParameter.KEY, subtaskStats.getJobVertexId().toString());
			archive.add(new ArchivedJson(path, json));
		}
	}
	return archive;
}
 
Example #5
Source File: TaskCheckpointStatisticDetailsHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	CheckpointStatsSnapshot stats = graph.getCheckpointStatsSnapshot();
	if (stats == null) {
		return Collections.emptyList();
	}
	CheckpointStatsHistory history = stats.getHistory();
	List<ArchivedJson> archive = new ArrayList<>(history.getCheckpoints().size());
	for (AbstractCheckpointStats checkpoint : history.getCheckpoints()) {
		for (TaskStateStats subtaskStats : checkpoint.getAllTaskStateStats()) {
			ResponseBody json = createCheckpointDetails(checkpoint, subtaskStats);
			String path = getMessageHeaders().getTargetRestEndpointURL()
				.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString())
				.replace(':' + CheckpointIdPathParameter.KEY, String.valueOf(checkpoint.getCheckpointId()))
				.replace(':' + JobVertexIdPathParameter.KEY, subtaskStats.getJobVertexId().toString());
			archive.add(new ArchivedJson(path, json));
		}
	}
	return archive;
}
 
Example #6
Source File: AbstractExecutionGraphHandler.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected CompletableFuture<R> handleRequest(@Nonnull HandlerRequest<EmptyRequestBody, M> request, @Nonnull RestfulGateway gateway) throws RestHandlerException {
	JobID jobId = request.getPathParameter(JobIDPathParameter.class);

	CompletableFuture<AccessExecutionGraph> executionGraphFuture = executionGraphCache.getExecutionGraph(jobId, gateway);

	return executionGraphFuture.thenApplyAsync(
		executionGraph -> {
			try {
				return handleRequest(request, executionGraph);
			} catch (RestHandlerException rhe) {
				throw new CompletionException(rhe);
			}
		}, executor)
		.exceptionally(throwable -> {
			throwable = ExceptionUtils.stripCompletionException(throwable);
			if (throwable instanceof FlinkJobNotFoundException) {
				throw new CompletionException(
					new NotFoundException(String.format("Job %s not found", jobId), throwable));
			} else {
				throw new CompletionException(throwable);
			}
		});
}
 
Example #7
Source File: DefaultExecutionGraphCacheTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that an AccessExecutionGraph is invalidated after its TTL expired.
 */
@Test
public void testExecutionGraphEntryInvalidation() throws Exception {
	final Time timeout = Time.milliseconds(100L);
	final Time timeToLive = Time.milliseconds(1L);

	final CountingRestfulGateway restfulGateway = createCountingRestfulGateway(
		expectedJobId,
		CompletableFuture.completedFuture(expectedExecutionGraph),
		CompletableFuture.completedFuture(expectedExecutionGraph));

	try (ExecutionGraphCache executionGraphCache = new DefaultExecutionGraphCache(timeout, timeToLive)) {
		CompletableFuture<AccessExecutionGraph> executionGraphFuture = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		assertEquals(expectedExecutionGraph, executionGraphFuture.get());

		// sleep for the TTL
		Thread.sleep(timeToLive.toMilliseconds() * 5L);

		CompletableFuture<AccessExecutionGraph> executionGraphFuture2 = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		assertEquals(expectedExecutionGraph, executionGraphFuture2.get());

		assertThat(restfulGateway.getNumRequestJobCalls(), Matchers.equalTo(2));
	}
}
 
Example #8
Source File: JobConfigHandler.java    From flink with Apache License 2.0 6 votes vote down vote up
private static JobConfigInfo createJobConfigInfo(AccessExecutionGraph executionGraph) {
	final ArchivedExecutionConfig executionConfig = executionGraph.getArchivedExecutionConfig();
	final JobConfigInfo.ExecutionConfigInfo executionConfigInfo;

	if (executionConfig != null) {
		executionConfigInfo = new JobConfigInfo.ExecutionConfigInfo(
			executionConfig.getExecutionMode(),
			executionConfig.getRestartStrategyDescription(),
			executionConfig.getParallelism(),
			executionConfig.getObjectReuseEnabled(),
			executionConfig.getGlobalJobParameters());
	} else {
		executionConfigInfo = null;
	}

	return new JobConfigInfo(executionGraph.getJobID(), executionGraph.getJobName(), executionConfigInfo);
}
 
Example #9
Source File: DefaultExecutionGraphCacheTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we can cache AccessExecutionGraphs over multiple accesses.
 */
@Test
public void testExecutionGraphCaching() throws Exception {
	final Time timeout = Time.milliseconds(100L);
	final Time timeToLive = Time.hours(1L);

	final CountingRestfulGateway restfulGateway = createCountingRestfulGateway(expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraph));

	try (ExecutionGraphCache executionGraphCache = new DefaultExecutionGraphCache(timeout, timeToLive)) {
		CompletableFuture<AccessExecutionGraph> accessExecutionGraphFuture = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		assertEquals(expectedExecutionGraph, accessExecutionGraphFuture.get());

		accessExecutionGraphFuture = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		assertEquals(expectedExecutionGraph, accessExecutionGraphFuture.get());

		assertThat(restfulGateway.getNumRequestJobCalls(), Matchers.equalTo(1));
	}
}
 
Example #10
Source File: JobConfigHandler.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static JobConfigInfo createJobConfigInfo(AccessExecutionGraph executionGraph) {
	final ArchivedExecutionConfig executionConfig = executionGraph.getArchivedExecutionConfig();
	final JobConfigInfo.ExecutionConfigInfo executionConfigInfo;

	if (executionConfig != null) {
		executionConfigInfo = new JobConfigInfo.ExecutionConfigInfo(
			executionConfig.getExecutionMode(),
			executionConfig.getRestartStrategyDescription(),
			executionConfig.getParallelism(),
			executionConfig.getObjectReuseEnabled(),
			executionConfig.getGlobalJobParameters());
	} else {
		executionConfigInfo = null;
	}

	return new JobConfigInfo(executionGraph.getJobID(), executionGraph.getJobName(), executionConfigInfo);
}
 
Example #11
Source File: JobAccumulatorsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected JobAccumulatorsInfo handleRequest(HandlerRequest<EmptyRequestBody, JobAccumulatorsMessageParameters> request, AccessExecutionGraph graph) throws RestHandlerException {
	List<Boolean> queryParams = request.getQueryParameter(AccumulatorsIncludeSerializedValueQueryParameter.class);

	final boolean includeSerializedValue;
	if (!queryParams.isEmpty()) {
		includeSerializedValue = queryParams.get(0);
	} else {
		includeSerializedValue = false;
	}

	return createJobAccumulatorsInfo(graph, includeSerializedValue);
}
 
Example #12
Source File: JobAccumulatorsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = createJobAccumulatorsInfo(graph, true);
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singleton(new ArchivedJson(path, json));
}
 
Example #13
Source File: WebMonitorUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static JobDetails createDetailsForJob(AccessExecutionGraph job) {
	JobStatus status = job.getState();

	long started = job.getStatusTimestamp(JobStatus.CREATED);
	long finished = status.isGloballyTerminalState() ? job.getStatusTimestamp(status) : -1L;
	long duration = (finished >= 0L ? finished : System.currentTimeMillis()) - started;

	int[] countsPerStatus = new int[ExecutionState.values().length];
	long lastChanged = 0;
	int numTotalTasks = 0;

	for (AccessExecutionJobVertex ejv : job.getVerticesTopologically()) {
		AccessExecutionVertex[] vertices = ejv.getTaskVertices();
		numTotalTasks += vertices.length;

		for (AccessExecutionVertex vertex : vertices) {
			ExecutionState state = vertex.getExecutionState();
			countsPerStatus[state.ordinal()]++;
			lastChanged = Math.max(lastChanged, vertex.getStateTimestamp(state));
		}
	}

	lastChanged = Math.max(lastChanged, finished);

	return new JobDetails(
		job.getJobID(),
		job.getJobName(),
		started,
		finished,
		duration,
		status,
		lastChanged,
		countsPerStatus,
		numTotalTasks);
}
 
Example #14
Source File: JobVertexDetailsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	Collection<? extends AccessExecutionJobVertex> vertices = graph.getAllVertices().values();
	List<ArchivedJson> archive = new ArrayList<>(vertices.size());
	for (AccessExecutionJobVertex task : vertices) {
		ResponseBody json = createJobVertexDetailsInfo(task, graph.getJobID(), null);
		String path = getMessageHeaders().getTargetRestEndpointURL()
			.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString())
			.replace(':' + JobVertexIdPathParameter.KEY, task.getJobVertexId().toString());
		archive.add(new ArchivedJson(path, json));
	}
	return archive;
}
 
Example #15
Source File: JobsOverviewHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = new MultipleJobsDetails(Collections.singleton(WebMonitorUtils.createDetailsForJob(graph)));
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singletonList(new ArchivedJson(path, json));
}
 
Example #16
Source File: SubtasksTimesHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	Collection<? extends AccessExecutionJobVertex> allVertices = graph.getAllVertices().values();
	List<ArchivedJson> archive = new ArrayList<>(allVertices.size());
	for (AccessExecutionJobVertex task : allVertices) {
		ResponseBody json = createSubtaskTimesInfo(task);
		String path = getMessageHeaders().getTargetRestEndpointURL()
			.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString())
			.replace(':' + JobVertexIdPathParameter.KEY, task.getJobVertexId().toString());
		archive.add(new ArchivedJson(path, json));
	}
	return archive;
}
 
Example #17
Source File: JobsOverviewHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = new MultipleJobsDetails(Collections.singleton(WebMonitorUtils.createDetailsForJob(graph)));
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singletonList(new ArchivedJson(path, json));
}
 
Example #18
Source File: JobPlanHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = createJobPlanInfo(graph);
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singleton(new ArchivedJson(path, json));
}
 
Example #19
Source File: MiniClusterClient.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, OptionalFailure<Object>> getAccumulators(JobID jobID, ClassLoader loader) throws Exception {
	AccessExecutionGraph executionGraph = miniCluster.getExecutionGraph(jobID).get();
	Map<String, SerializedValue<OptionalFailure<Object>>> accumulatorsSerialized = executionGraph.getAccumulatorsSerialized();
	Map<String, OptionalFailure<Object>> result = new HashMap<>(accumulatorsSerialized.size());
	for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> acc : accumulatorsSerialized.entrySet()) {
		result.put(acc.getKey(), acc.getValue().deserializeValue(loader));
	}
	return result;
}
 
Example #20
Source File: ArchivedJobGenerationUtils.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static AccessExecutionGraph getTestJob() throws Exception {
	synchronized (lock) {
		if (originalJob == null) {
			generateArchivedJob();
		}
	}
	return originalJob;
}
 
Example #21
Source File: ExecutionGraphCacheTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that concurrent accesses only trigger a single AccessExecutionGraph request.
 */
@Test
public void testConcurrentAccess() throws Exception {
	final Time timeout = Time.milliseconds(100L);
	final Time timeToLive = Time.hours(1L);

	final CountingRestfulGateway restfulGateway = createCountingRestfulGateway(expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraph));

	final int numConcurrentAccesses = 10;

	final ArrayList<CompletableFuture<AccessExecutionGraph>> executionGraphFutures = new ArrayList<>(numConcurrentAccesses);

	final ExecutorService executor = java.util.concurrent.Executors.newFixedThreadPool(numConcurrentAccesses);

	try (ExecutionGraphCache executionGraphCache = new ExecutionGraphCache(timeout, timeToLive)) {
		for (int i = 0; i < numConcurrentAccesses; i++) {
			CompletableFuture<AccessExecutionGraph> executionGraphFuture = CompletableFuture
				.supplyAsync(
					() -> executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway),
					executor)
				.thenCompose(Function.identity());

			executionGraphFutures.add(executionGraphFuture);
		}

		final CompletableFuture<Collection<AccessExecutionGraph>> allExecutionGraphFutures = FutureUtils.combineAll(executionGraphFutures);

		Collection<AccessExecutionGraph> allExecutionGraphs = allExecutionGraphFutures.get();

		for (AccessExecutionGraph executionGraph : allExecutionGraphs) {
			assertEquals(expectedExecutionGraph, executionGraph);
		}

		assertThat(restfulGateway.getNumRequestJobCalls(), Matchers.equalTo(1));
	} finally {
		ExecutorUtils.gracefulShutdown(5000L, TimeUnit.MILLISECONDS, executor);
	}
}
 
Example #22
Source File: TaskExecutorITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private SupplierWithException<Boolean, Exception> jobIsRunning(Supplier<CompletableFuture<? extends AccessExecutionGraph>> executionGraphFutureSupplier) {
	final Predicate<AccessExecution> runningOrFinished = ExecutionGraphTestUtils.isInExecutionState(ExecutionState.RUNNING).or(ExecutionGraphTestUtils.isInExecutionState(ExecutionState.FINISHED));
	final Predicate<AccessExecutionGraph> allExecutionsRunning = ExecutionGraphTestUtils.allExecutionsPredicate(runningOrFinished);

	return () -> {
		final AccessExecutionGraph executionGraph = executionGraphFutureSupplier.get().join();
		return allExecutionsRunning.test(executionGraph);
	};
}
 
Example #23
Source File: JobConfigHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = createJobConfigInfo(graph);
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singleton(new ArchivedJson(path, json));
}
 
Example #24
Source File: JobVertexDetailsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected JobVertexDetailsInfo handleRequest(
		HandlerRequest<EmptyRequestBody, JobVertexMessageParameters> request,
		AccessExecutionGraph executionGraph) throws NotFoundException {
	JobID jobID = request.getPathParameter(JobIDPathParameter.class);
	JobVertexID jobVertexID = request.getPathParameter(JobVertexIdPathParameter.class);
	AccessExecutionJobVertex jobVertex = executionGraph.getJobVertex(jobVertexID);

	if (jobVertex == null) {
		throw new NotFoundException(String.format("JobVertex %s not found", jobVertexID));
	}

	return createJobVertexDetailsInfo(jobVertex, jobID, metricFetcher);
}
 
Example #25
Source File: JobVertexDetailsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected JobVertexDetailsInfo handleRequest(
		HandlerRequest<EmptyRequestBody, JobVertexMessageParameters> request,
		AccessExecutionGraph executionGraph) throws NotFoundException {
	JobID jobID = request.getPathParameter(JobIDPathParameter.class);
	JobVertexID jobVertexID = request.getPathParameter(JobVertexIdPathParameter.class);
	AccessExecutionJobVertex jobVertex = executionGraph.getJobVertex(jobVertexID);

	if (jobVertex == null) {
		throw new NotFoundException(String.format("JobVertex %s not found", jobVertexID));
	}

	return createJobVertexDetailsInfo(jobVertex, jobID, metricFetcher);
}
 
Example #26
Source File: JobDetailsHandler.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = createJobDetailsInfo(graph, null);
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singleton(new ArchivedJson(path, json));
}
 
Example #27
Source File: SubtaskExecutionAttemptDetailsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	List<ArchivedJson> archive = new ArrayList<>(16);
	for (AccessExecutionJobVertex task : graph.getAllVertices().values()) {
		for (AccessExecutionVertex subtask : task.getTaskVertices()) {
			ResponseBody curAttemptJson = SubtaskExecutionAttemptDetailsInfo.create(subtask.getCurrentExecutionAttempt(), null, graph.getJobID(), task.getJobVertexId());
			String curAttemptPath = getMessageHeaders().getTargetRestEndpointURL()
				.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString())
				.replace(':' + JobVertexIdPathParameter.KEY, task.getJobVertexId().toString())
				.replace(':' + SubtaskIndexPathParameter.KEY, String.valueOf(subtask.getParallelSubtaskIndex()))
				.replace(':' + SubtaskAttemptPathParameter.KEY, String.valueOf(subtask.getCurrentExecutionAttempt().getAttemptNumber()));

			archive.add(new ArchivedJson(curAttemptPath, curAttemptJson));

			for (int x = 0; x < subtask.getCurrentExecutionAttempt().getAttemptNumber(); x++) {
				AccessExecution attempt = subtask.getPriorExecutionAttempt(x);
				if (attempt != null) {
					ResponseBody json = SubtaskExecutionAttemptDetailsInfo.create(attempt, null, graph.getJobID(), task.getJobVertexId());
					String path = getMessageHeaders().getTargetRestEndpointURL()
						.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString())
						.replace(':' + JobVertexIdPathParameter.KEY, task.getJobVertexId().toString())
						.replace(':' + SubtaskIndexPathParameter.KEY, String.valueOf(subtask.getParallelSubtaskIndex()))
						.replace(':' + SubtaskAttemptPathParameter.KEY, String.valueOf(attempt.getAttemptNumber()));
					archive.add(new ArchivedJson(path, json));
				}
			}
		}
	}
	return archive;
}
 
Example #28
Source File: JobExceptionsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
private static JobExceptionsInfo createJobExceptionsInfo(AccessExecutionGraph executionGraph, int exceptionToReportMaxSize) {
	ErrorInfo rootException = executionGraph.getFailureInfo();
	String rootExceptionMessage = null;
	Long rootTimestamp = null;
	if (rootException != null) {
		rootExceptionMessage = rootException.getExceptionAsString();
		rootTimestamp = rootException.getTimestamp();
	}

	List<JobExceptionsInfo.ExecutionExceptionInfo> taskExceptionList = new ArrayList<>();
	boolean truncated = false;
	for (AccessExecutionVertex task : executionGraph.getAllExecutionVertices()) {
		String t = task.getFailureCauseAsString();
		if (t != null && !t.equals(ExceptionUtils.STRINGIFIED_NULL_EXCEPTION)) {
			if (taskExceptionList.size() >= exceptionToReportMaxSize) {
				truncated = true;
				break;
			}

			TaskManagerLocation location = task.getCurrentAssignedResourceLocation();
			String locationString = location != null ?
				location.getFQDNHostname() + ':' + location.dataPort() : "(unassigned)";
			long timestamp = task.getStateTimestamp(ExecutionState.FAILED);
			taskExceptionList.add(new JobExceptionsInfo.ExecutionExceptionInfo(
				t,
				task.getTaskNameWithSubtaskIndex(),
				locationString,
				timestamp == 0 ? -1 : timestamp));
		}
	}

	return new JobExceptionsInfo(rootExceptionMessage, rootTimestamp, taskExceptionList, truncated);
}
 
Example #29
Source File: JobDetailsHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<ArchivedJson> archiveJsonWithPath(AccessExecutionGraph graph) throws IOException {
	ResponseBody json = createJobDetailsInfo(graph, null);
	String path = getMessageHeaders().getTargetRestEndpointURL()
		.replace(':' + JobIDPathParameter.KEY, graph.getJobID().toString());
	return Collections.singleton(new ArchivedJson(path, json));
}
 
Example #30
Source File: JobConfigHandlerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void handleRequest_executionConfigWithSecretValues_excludesSecretValuesFromResponse() throws HandlerRequestException {
	final JobConfigHandler jobConfigHandler = new JobConfigHandler(
		() -> null,
		TestingUtils.TIMEOUT(),
		Collections.emptyMap(),
		JobConfigHeaders.getInstance(),
		new DefaultExecutionGraphCache(TestingUtils.TIMEOUT(), TestingUtils.TIMEOUT()),
		TestingUtils.defaultExecutor());

	final Map<String, String> globalJobParameters = new HashMap<>();
	globalJobParameters.put("foobar", "barfoo");
	globalJobParameters.put("bar.secret.foo", "my secret");
	globalJobParameters.put("password.to.my.safe", "12345");

	final ArchivedExecutionConfig archivedExecutionConfig = new ArchivedExecutionConfigBuilder()
		.setGlobalJobParameters(globalJobParameters)
		.build();
	final AccessExecutionGraph archivedExecutionGraph = new ArchivedExecutionGraphBuilder()
		.setArchivedExecutionConfig(archivedExecutionConfig)
		.build();
	final HandlerRequest<EmptyRequestBody, JobMessageParameters> handlerRequest = createRequest(archivedExecutionGraph.getJobID());

	final JobConfigInfo jobConfigInfoResponse = jobConfigHandler.handleRequest(handlerRequest, archivedExecutionGraph);

	final Map<String, String> filteredGlobalJobParameters = filterSecretValues(globalJobParameters);

	assertThat(jobConfigInfoResponse.getExecutionConfigInfo().getGlobalJobParameters(), is(equalTo(filteredGlobalJobParameters)));
}