Java Code Examples for org.apache.flink.runtime.execution.ExecutionState#values()

The following examples show how to use org.apache.flink.runtime.execution.ExecutionState#values() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobVertexTaskManagersInfoTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected JobVertexTaskManagersInfo getTestResponseInstance() throws Exception {
	final Random random = new Random();
	List<TaskManagersInfo> taskManagersInfoList = new ArrayList<>();

	final Map<ExecutionState, Integer> statusCounts = new HashMap<>(ExecutionState.values().length);
	final IOMetricsInfo jobVertexMetrics = new IOMetricsInfo(
		random.nextLong(),
		random.nextBoolean(),
		random.nextLong(),
		random.nextBoolean(),
		random.nextLong(),
		random.nextBoolean(),
		random.nextLong(),
		random.nextBoolean());
	int count = 100;
	for (ExecutionState executionState : ExecutionState.values()) {
		statusCounts.put(executionState, count++);
	}
	taskManagersInfoList.add(new TaskManagersInfo("host1", ExecutionState.CANCELING, 1L, 2L, 3L, jobVertexMetrics, statusCounts, "taskmanagerId"));

	return new JobVertexTaskManagersInfo(new JobVertexID(), "test", System.currentTimeMillis(), taskManagersInfoList);
}
 
Example 2
Source File: SubtaskExecutionAttemptDetailsInfoTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected SubtaskExecutionAttemptDetailsInfo getTestResponseInstance() throws Exception {
	final Random random = new Random();

	final IOMetricsInfo ioMetricsInfo = new IOMetricsInfo(
		Math.abs(random.nextLong()),
		random.nextBoolean(),
		Math.abs(random.nextLong()),
		random.nextBoolean(),
		Math.abs(random.nextLong()),
		random.nextBoolean(),
		Math.abs(random.nextLong()),
		random.nextBoolean()
	);

	return new SubtaskExecutionAttemptDetailsInfo(
		Math.abs(random.nextInt()),
		ExecutionState.values()[random.nextInt(ExecutionState.values().length)],
		Math.abs(random.nextInt()),
		"localhost:" + random.nextInt(65536),
		Math.abs(random.nextLong()),
		Math.abs(random.nextLong()),
		Math.abs(random.nextLong()),
		ioMetricsInfo
	);
}
 
Example 3
Source File: WebMonitorMessagesTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private Collection<JobDetails> randomJobDetails(Random rnd) {
	final JobDetails[] details = new JobDetails[rnd.nextInt(10)];
	for (int k = 0; k < details.length; k++) {
		int[] numVerticesPerState = new int[ExecutionState.values().length];
		int numTotal = 0;

		for (int i = 0; i < numVerticesPerState.length; i++) {
			int count = rnd.nextInt(55);
			numVerticesPerState[i] = count;
			numTotal += count;
		}

		long time = rnd.nextLong();
		long endTime = rnd.nextBoolean() ? -1L : time + rnd.nextInt();
		long lastModified = endTime == -1 ? time + rnd.nextInt() : endTime;

		String name = new GenericMessageTester.StringInstantiator().instantiate(rnd);
		JobID jid = new JobID();
		JobStatus status = JobStatus.values()[rnd.nextInt(JobStatus.values().length)];

		details[k] = new JobDetails(jid, name, time, endTime, endTime - time, status, lastModified, numVerticesPerState, numTotal);
	}
	return Arrays.asList(details);
}
 
Example 4
Source File: JobVertexTaskManagersInfoTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected JobVertexTaskManagersInfo getTestResponseInstance() throws Exception {
	final Random random = new Random();
	List<TaskManagersInfo> taskManagersInfoList = new ArrayList<>();

	final Map<ExecutionState, Integer> statusCounts = new HashMap<>(ExecutionState.values().length);
	final IOMetricsInfo jobVertexMetrics = new IOMetricsInfo(
		random.nextLong(),
		random.nextBoolean(),
		random.nextLong(),
		random.nextBoolean(),
		random.nextLong(),
		random.nextBoolean(),
		random.nextLong(),
		random.nextBoolean());
	int count = 100;
	for (ExecutionState executionState : ExecutionState.values()) {
		statusCounts.put(executionState, count++);
	}
	taskManagersInfoList.add(new TaskManagersInfo("host1", ExecutionState.CANCELING, 1L, 2L, 3L, jobVertexMetrics, statusCounts));

	return new JobVertexTaskManagersInfo(new JobVertexID(), "test", System.currentTimeMillis(), taskManagersInfoList);
}
 
Example 5
Source File: WebMonitorUtils.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static JobDetails createDetailsForJob(AccessExecutionGraph job) {
	JobStatus status = job.getState();

	long started = job.getStatusTimestamp(JobStatus.CREATED);
	long finished = status.isGloballyTerminalState() ? job.getStatusTimestamp(status) : -1L;
	long duration = (finished >= 0L ? finished : System.currentTimeMillis()) - started;

	int[] countsPerStatus = new int[ExecutionState.values().length];
	long lastChanged = 0;
	int numTotalTasks = 0;

	for (AccessExecutionJobVertex ejv : job.getVerticesTopologically()) {
		AccessExecutionVertex[] vertices = ejv.getTaskVertices();
		numTotalTasks += vertices.length;

		for (AccessExecutionVertex vertex : vertices) {
			ExecutionState state = vertex.getExecutionState();
			countsPerStatus[state.ordinal()]++;
			lastChanged = Math.max(lastChanged, vertex.getStateTimestamp(state));
		}
	}

	lastChanged = Math.max(lastChanged, finished);

	return new JobDetails(
		job.getJobID(),
		job.getJobName(),
		started,
		finished,
		duration,
		status,
		lastChanged,
		countsPerStatus,
		numTotalTasks);
}
 
Example 6
Source File: JobDetailsInfoTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected JobDetailsInfo getTestResponseInstance() throws Exception {
	final Random random = new Random();
	final int numJobVertexDetailsInfos = 4;
	final String jsonPlan = "{\"id\":\"1234\"}";

	final Map<JobStatus, Long> timestamps = new HashMap<>(JobStatus.values().length);
	final Collection<JobDetailsInfo.JobVertexDetailsInfo> jobVertexInfos = new ArrayList<>(numJobVertexDetailsInfos);
	final Map<ExecutionState, Integer> jobVerticesPerState = new HashMap<>(ExecutionState.values().length);

	for (JobStatus jobStatus : JobStatus.values()) {
		timestamps.put(jobStatus, random.nextLong());
	}

	for (int i = 0; i < numJobVertexDetailsInfos; i++) {
		jobVertexInfos.add(createJobVertexDetailsInfo(random));
	}

	for (ExecutionState executionState : ExecutionState.values()) {
		jobVerticesPerState.put(executionState, random.nextInt());
	}

	return new JobDetailsInfo(
		new JobID(),
		"foobar",
		true,
		JobStatus.values()[random.nextInt(JobStatus.values().length)],
		1L,
		2L,
		1L,
		1984L,
		timestamps,
		jobVertexInfos,
		jobVerticesPerState,
		jsonPlan);
}
 
Example 7
Source File: ExecutionJobVertex.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ExecutionState getAggregateState() {
	int[] num = new int[ExecutionState.values().length];
	for (ExecutionVertex vertex : this.taskVertices) {
		num[vertex.getExecutionState().ordinal()]++;
	}

	return getAggregateJobVertexState(num, parallelism);
}
 
Example 8
Source File: JobDetails.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(
		JobDetails jobDetails,
		JsonGenerator jsonGenerator,
		SerializerProvider serializerProvider) throws IOException {
	jsonGenerator.writeStartObject();

	jsonGenerator.writeStringField(FIELD_NAME_JOB_ID, jobDetails.getJobId().toString());
	jsonGenerator.writeStringField(FIELD_NAME_JOB_NAME, jobDetails.getJobName());
	jsonGenerator.writeStringField(FIELD_NAME_STATUS, jobDetails.getStatus().name());

	jsonGenerator.writeNumberField(FIELD_NAME_START_TIME, jobDetails.getStartTime());
	jsonGenerator.writeNumberField(FIELD_NAME_END_TIME, jobDetails.getEndTime());
	jsonGenerator.writeNumberField(FIELD_NAME_DURATION, jobDetails.getDuration());
	jsonGenerator.writeNumberField(FIELD_NAME_LAST_MODIFICATION, jobDetails.getLastUpdateTime());

	jsonGenerator.writeObjectFieldStart("tasks");
	jsonGenerator.writeNumberField(FIELD_NAME_TOTAL_NUMBER_TASKS, jobDetails.getNumTasks());

	final int[] perState = jobDetails.getTasksPerState();

	for (ExecutionState executionState : ExecutionState.values()) {
		jsonGenerator.writeNumberField(executionState.name().toLowerCase(), perState[executionState.ordinal()]);
	}

	jsonGenerator.writeEndObject();

	jsonGenerator.writeEndObject();
}
 
Example 9
Source File: JobDetails.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(
		JobDetails jobDetails,
		JsonGenerator jsonGenerator,
		SerializerProvider serializerProvider) throws IOException {
	jsonGenerator.writeStartObject();

	jsonGenerator.writeStringField(FIELD_NAME_JOB_ID, jobDetails.getJobId().toString());
	jsonGenerator.writeStringField(FIELD_NAME_JOB_NAME, jobDetails.getJobName());
	jsonGenerator.writeStringField(FIELD_NAME_STATUS, jobDetails.getStatus().name());

	jsonGenerator.writeNumberField(FIELD_NAME_START_TIME, jobDetails.getStartTime());
	jsonGenerator.writeNumberField(FIELD_NAME_END_TIME, jobDetails.getEndTime());
	jsonGenerator.writeNumberField(FIELD_NAME_DURATION, jobDetails.getDuration());
	jsonGenerator.writeNumberField(FIELD_NAME_LAST_MODIFICATION, jobDetails.getLastUpdateTime());

	jsonGenerator.writeObjectFieldStart("tasks");
	jsonGenerator.writeNumberField(FIELD_NAME_TOTAL_NUMBER_TASKS, jobDetails.getNumTasks());

	final int[] perState = jobDetails.getTasksPerState();

	for (ExecutionState executionState : ExecutionState.values()) {
		jsonGenerator.writeNumberField(executionState.name().toLowerCase(), perState[executionState.ordinal()]);
	}

	jsonGenerator.writeEndObject();

	jsonGenerator.writeEndObject();
}
 
Example 10
Source File: JobDetails.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public JobDetails deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException {

	JsonNode rootNode = jsonParser.readValueAsTree();

	JobID jobId = JobID.fromHexString(rootNode.get(FIELD_NAME_JOB_ID).textValue());
	String jobName = rootNode.get(FIELD_NAME_JOB_NAME).textValue();
	long startTime = rootNode.get(FIELD_NAME_START_TIME).longValue();
	long endTime = rootNode.get(FIELD_NAME_END_TIME).longValue();
	long duration = rootNode.get(FIELD_NAME_DURATION).longValue();
	JobStatus jobStatus = JobStatus.valueOf(rootNode.get(FIELD_NAME_STATUS).textValue());
	long lastUpdateTime = rootNode.get(FIELD_NAME_LAST_MODIFICATION).longValue();

	JsonNode tasksNode = rootNode.get("tasks");
	int numTasks = tasksNode.get(FIELD_NAME_TOTAL_NUMBER_TASKS).intValue();

	int[] numVerticesPerExecutionState = new int[ExecutionState.values().length];

	for (ExecutionState executionState : ExecutionState.values()) {
		numVerticesPerExecutionState[executionState.ordinal()] = tasksNode.get(executionState.name().toLowerCase()).intValue();
	}

	return new JobDetails(
		jobId,
		jobName,
		startTime,
		endTime,
		duration,
		jobStatus,
		lastUpdateTime,
		numVerticesPerExecutionState,
		numTasks);
}
 
Example 11
Source File: Execution.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new Execution attempt.
 *
 * @param executor
 *             The executor used to dispatch callbacks from futures and asynchronous RPC calls.
 * @param vertex
 *             The execution vertex to which this Execution belongs
 * @param attemptNumber
 *             The execution attempt number.
 * @param globalModVersion
 *             The global modification version of the execution graph when this execution was created
 * @param startTimestamp
 *             The timestamp that marks the creation of this Execution
 * @param rpcTimeout
 *             The rpcTimeout for RPC calls like deploy/cancel/stop.
 */
public Execution(
		Executor executor,
		ExecutionVertex vertex,
		int attemptNumber,
		long globalModVersion,
		long startTimestamp,
		Time rpcTimeout) {

	this.executor = checkNotNull(executor);
	this.vertex = checkNotNull(vertex);
	this.attemptId = new ExecutionAttemptID();
	this.rpcTimeout = checkNotNull(rpcTimeout);

	this.globalModVersion = globalModVersion;
	this.attemptNumber = attemptNumber;

	this.stateTimestamps = new long[ExecutionState.values().length];
	markTimestamp(CREATED, startTimestamp);

	this.partialInputChannelDeploymentDescriptors = new ConcurrentLinkedQueue<>();
	this.terminalStateFuture = new CompletableFuture<>();
	this.releaseFuture = new CompletableFuture<>();
	this.taskManagerLocationFuture = new CompletableFuture<>();

	this.assignedResource = null;
}
 
Example 12
Source File: SubtaskExecutionAttemptDetailsInfoTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected SubtaskExecutionAttemptDetailsInfo getTestResponseInstance() throws Exception {
	final Random random = new Random();

	final IOMetricsInfo ioMetricsInfo = new IOMetricsInfo(
		Math.abs(random.nextLong()),
		random.nextBoolean(),
		Math.abs(random.nextLong()),
		random.nextBoolean(),
		Math.abs(random.nextLong()),
		random.nextBoolean(),
		Math.abs(random.nextLong()),
		random.nextBoolean()
	);

	return new SubtaskExecutionAttemptDetailsInfo(
		Math.abs(random.nextInt()),
		ExecutionState.values()[random.nextInt(ExecutionState.values().length)],
		Math.abs(random.nextInt()),
		"localhost:" + random.nextInt(65536),
		Math.abs(random.nextLong()),
		Math.abs(random.nextLong()),
		Math.abs(random.nextLong()),
		ioMetricsInfo,
		"taskmanagerId"
	);
}
 
Example 13
Source File: JobDetails.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(
		JobDetails jobDetails,
		JsonGenerator jsonGenerator,
		SerializerProvider serializerProvider) throws IOException {
	jsonGenerator.writeStartObject();

	jsonGenerator.writeStringField(FIELD_NAME_JOB_ID, jobDetails.getJobId().toString());
	jsonGenerator.writeStringField(FIELD_NAME_JOB_NAME, jobDetails.getJobName());
	jsonGenerator.writeStringField(FIELD_NAME_STATUS, jobDetails.getStatus().name());

	jsonGenerator.writeNumberField(FIELD_NAME_START_TIME, jobDetails.getStartTime());
	jsonGenerator.writeNumberField(FIELD_NAME_END_TIME, jobDetails.getEndTime());
	jsonGenerator.writeNumberField(FIELD_NAME_DURATION, jobDetails.getDuration());
	jsonGenerator.writeNumberField(FIELD_NAME_LAST_MODIFICATION, jobDetails.getLastUpdateTime());

	jsonGenerator.writeObjectFieldStart("tasks");
	jsonGenerator.writeNumberField(FIELD_NAME_TOTAL_NUMBER_TASKS, jobDetails.getNumTasks());

	final int[] perState = jobDetails.getTasksPerState();

	for (ExecutionState executionState : ExecutionState.values()) {
		jsonGenerator.writeNumberField(executionState.name().toLowerCase(), perState[executionState.ordinal()]);
	}

	jsonGenerator.writeEndObject();

	jsonGenerator.writeEndObject();
}
 
Example 14
Source File: HistoryServerArchiveFetcher.java    From flink with Apache License 2.0 4 votes vote down vote up
private static String convertLegacyJobOverview(String legacyOverview) throws IOException {
	JsonNode root = mapper.readTree(legacyOverview);
	JsonNode finishedJobs = root.get("finished");
	JsonNode job = finishedJobs.get(0);

	JobID jobId = JobID.fromHexString(job.get("jid").asText());
	String name = job.get("name").asText();
	JobStatus state = JobStatus.valueOf(job.get("state").asText());

	long startTime = job.get("start-time").asLong();
	long endTime = job.get("end-time").asLong();
	long duration = job.get("duration").asLong();
	long lastMod = job.get("last-modification").asLong();

	JsonNode tasks = job.get("tasks");
	int numTasks = tasks.get("total").asInt();
	JsonNode pendingNode = tasks.get("pending");
	// for flink version < 1.4 we have pending field,
	// when version >= 1.4 pending has been split into scheduled, deploying, and created.
	boolean versionLessThan14 = pendingNode != null;
	int created = 0;
	int scheduled;
	int deploying = 0;

	if (versionLessThan14) {
		// pending is a mix of CREATED/SCHEDULED/DEPLOYING
		// to maintain the correct number of task states we pick SCHEDULED
		scheduled = pendingNode.asInt();
	} else {
		created = tasks.get("created").asInt();
		scheduled = tasks.get("scheduled").asInt();
		deploying = tasks.get("deploying").asInt();
	}
	int running = tasks.get("running").asInt();
	int finished = tasks.get("finished").asInt();
	int canceling = tasks.get("canceling").asInt();
	int canceled = tasks.get("canceled").asInt();
	int failed = tasks.get("failed").asInt();

	int[] tasksPerState = new int[ExecutionState.values().length];
	tasksPerState[ExecutionState.CREATED.ordinal()] = created;
	tasksPerState[ExecutionState.SCHEDULED.ordinal()] = scheduled;
	tasksPerState[ExecutionState.DEPLOYING.ordinal()] = deploying;
	tasksPerState[ExecutionState.RUNNING.ordinal()] = running;
	tasksPerState[ExecutionState.FINISHED.ordinal()] = finished;
	tasksPerState[ExecutionState.CANCELING.ordinal()] = canceling;
	tasksPerState[ExecutionState.CANCELED.ordinal()] = canceled;
	tasksPerState[ExecutionState.FAILED.ordinal()] = failed;

	JobDetails jobDetails = new JobDetails(jobId, name, startTime, endTime, duration, state, lastMod, tasksPerState, numTasks);
	MultipleJobsDetails multipleJobsDetails = new MultipleJobsDetails(Collections.singleton(jobDetails));

	StringWriter sw = new StringWriter();
	mapper.writeValue(sw, multipleJobsDetails);
	return sw.toString();
}
 
Example 15
Source File: SubtaskExecutionAttemptAccumulatorsHandlerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testHandleRequest() throws Exception {

	// Instance the handler.
	final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());

	final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(
		() -> null,
		Time.milliseconds(100L),
		Collections.emptyMap(),
		SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
		new ExecutionGraphCache(
			restHandlerConfiguration.getTimeout(),
			Time.milliseconds(restHandlerConfiguration.getRefreshInterval())),
		TestingUtils.defaultExecutor());

	// Instance a empty request.
	final HandlerRequest<EmptyRequestBody, SubtaskAttemptMessageParameters> request = new HandlerRequest<>(
		EmptyRequestBody.getInstance(),
		new SubtaskAttemptMessageParameters()
	);

	final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
	userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
	userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
	userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	// Instance the expected result.
	final StringifiedAccumulatorResult[] accumulatorResults =
		StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);

	final int attemptNum = 1;
	final int subtaskIndex = 2;

	// Instance the tested execution.
	final ArchivedExecution execution = new ArchivedExecution(
		accumulatorResults,
		null,
		new ExecutionAttemptID(),
		attemptNum,
		ExecutionState.FINISHED,
		null,
		null,
		null,
		subtaskIndex,
		new long[ExecutionState.values().length]);

	// Invoke tested method.
	final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);

	final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
	for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
		userAccumulatorList.add(
			new UserAccumulator(
				accumulatorResult.getName(),
				accumulatorResult.getType(),
				accumulatorResult.getValue()));
	}

	final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(
		subtaskIndex,
		attemptNum,
		execution.getAttemptId().toString(),
		userAccumulatorList);

	// Verify.
	assertEquals(expected, accumulatorsInfo);
}
 
Example 16
Source File: SubtaskExecutionAttemptAccumulatorsHandlerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testHandleRequest() throws Exception {

	// Instance the handler.
	final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());

	final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(
		() -> null,
		Time.milliseconds(100L),
		Collections.emptyMap(),
		SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
		new DefaultExecutionGraphCache(
			restHandlerConfiguration.getTimeout(),
			Time.milliseconds(restHandlerConfiguration.getRefreshInterval())),
		TestingUtils.defaultExecutor());

	// Instance a empty request.
	final HandlerRequest<EmptyRequestBody, SubtaskAttemptMessageParameters> request = new HandlerRequest<>(
		EmptyRequestBody.getInstance(),
		new SubtaskAttemptMessageParameters()
	);

	final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
	userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
	userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
	userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	// Instance the expected result.
	final StringifiedAccumulatorResult[] accumulatorResults =
		StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);

	final int attemptNum = 1;
	final int subtaskIndex = 2;

	// Instance the tested execution.
	final ArchivedExecution execution = new ArchivedExecution(
		accumulatorResults,
		null,
		new ExecutionAttemptID(),
		attemptNum,
		ExecutionState.FINISHED,
		null,
		null,
		null,
		subtaskIndex,
		new long[ExecutionState.values().length]);

	// Invoke tested method.
	final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);

	final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
	for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
		userAccumulatorList.add(
			new UserAccumulator(
				accumulatorResult.getName(),
				accumulatorResult.getType(),
				accumulatorResult.getValue()));
	}

	final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(
		subtaskIndex,
		attemptNum,
		execution.getAttemptId().toString(),
		userAccumulatorList);

	// Verify.
	assertEquals(expected, accumulatorsInfo);
}
 
Example 17
Source File: HistoryServerArchiveFetcher.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private static String convertLegacyJobOverview(String legacyOverview) throws IOException {
	JsonNode root = mapper.readTree(legacyOverview);
	JsonNode finishedJobs = root.get("finished");
	JsonNode job = finishedJobs.get(0);

	JobID jobId = JobID.fromHexString(job.get("jid").asText());
	String name = job.get("name").asText();
	JobStatus state = JobStatus.valueOf(job.get("state").asText());

	long startTime = job.get("start-time").asLong();
	long endTime = job.get("end-time").asLong();
	long duration = job.get("duration").asLong();
	long lastMod = job.get("last-modification").asLong();

	JsonNode tasks = job.get("tasks");
	int numTasks = tasks.get("total").asInt();
	JsonNode pendingNode = tasks.get("pending");
	// for flink version < 1.4 we have pending field,
	// when version >= 1.4 pending has been split into scheduled, deploying, and created.
	boolean versionLessThan14 = pendingNode != null;
	int created = 0;
	int scheduled;
	int deploying = 0;

	if (versionLessThan14) {
		// pending is a mix of CREATED/SCHEDULED/DEPLOYING
		// to maintain the correct number of task states we pick SCHEDULED
		scheduled = pendingNode.asInt();
	} else {
		created = tasks.get("created").asInt();
		scheduled = tasks.get("scheduled").asInt();
		deploying = tasks.get("deploying").asInt();
	}
	int running = tasks.get("running").asInt();
	int finished = tasks.get("finished").asInt();
	int canceling = tasks.get("canceling").asInt();
	int canceled = tasks.get("canceled").asInt();
	int failed = tasks.get("failed").asInt();

	int[] tasksPerState = new int[ExecutionState.values().length];
	tasksPerState[ExecutionState.CREATED.ordinal()] = created;
	tasksPerState[ExecutionState.SCHEDULED.ordinal()] = scheduled;
	tasksPerState[ExecutionState.DEPLOYING.ordinal()] = deploying;
	tasksPerState[ExecutionState.RUNNING.ordinal()] = running;
	tasksPerState[ExecutionState.FINISHED.ordinal()] = finished;
	tasksPerState[ExecutionState.CANCELING.ordinal()] = canceling;
	tasksPerState[ExecutionState.CANCELED.ordinal()] = canceled;
	tasksPerState[ExecutionState.FAILED.ordinal()] = failed;

	JobDetails jobDetails = new JobDetails(jobId, name, startTime, endTime, duration, state, lastMod, tasksPerState, numTasks);
	MultipleJobsDetails multipleJobsDetails = new MultipleJobsDetails(Collections.singleton(jobDetails));

	StringWriter sw = new StringWriter();
	mapper.writeValue(sw, multipleJobsDetails);
	return sw.toString();
}
 
Example 18
Source File: ShuffleDescriptorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the deployment descriptors for local, remote, and unknown partition
 * locations (with lazy deployment allowed and all execution states for the
 * producers).
 */
@Test
public void testMixedLocalRemoteUnknownDeployment() throws Exception {
	ResourceID consumerResourceID = ResourceID.generate();

	// Local and remote channel are only allowed for certain execution
	// states.
	for (ExecutionState state : ExecutionState.values()) {
		ResultPartitionID localPartitionId = new ResultPartitionID();
		ResultPartitionDeploymentDescriptor localPartition =
			createResultPartitionDeploymentDescriptor(localPartitionId, consumerResourceID);

		ResultPartitionID remotePartitionId = new ResultPartitionID();
		ResultPartitionDeploymentDescriptor remotePartition =
			createResultPartitionDeploymentDescriptor(remotePartitionId, ResourceID.generate());

		ResultPartitionID unknownPartitionId = new ResultPartitionID();

		ShuffleDescriptor localShuffleDescriptor =
			getConsumedPartitionShuffleDescriptor(localPartitionId, state, localPartition, true);
		ShuffleDescriptor remoteShuffleDescriptor =
			getConsumedPartitionShuffleDescriptor(remotePartitionId, state, remotePartition, true);
		ShuffleDescriptor unknownShuffleDescriptor =
			getConsumedPartitionShuffleDescriptor(unknownPartitionId, state, null, true);

		// These states are allowed
		if (state == ExecutionState.RUNNING ||
			state == ExecutionState.FINISHED ||
			state == ExecutionState.SCHEDULED ||
			state == ExecutionState.DEPLOYING) {
			NettyShuffleDescriptor nettyShuffleDescriptor;

			// Create local or remote channels
			verifyShuffleDescriptor(localShuffleDescriptor, NettyShuffleDescriptor.class, false, localPartitionId);
			nettyShuffleDescriptor = (NettyShuffleDescriptor) localShuffleDescriptor;
			assertThat(nettyShuffleDescriptor.isLocalTo(consumerResourceID), is(true));

			verifyShuffleDescriptor(remoteShuffleDescriptor, NettyShuffleDescriptor.class, false, remotePartitionId);
			nettyShuffleDescriptor = (NettyShuffleDescriptor) remoteShuffleDescriptor;
			assertThat(nettyShuffleDescriptor.isLocalTo(consumerResourceID), is(false));
			assertThat(nettyShuffleDescriptor.getConnectionId(), is(STUB_CONNECTION_ID));
		} else {
			// Unknown (lazy deployment allowed)
			verifyShuffleDescriptor(localShuffleDescriptor, UnknownShuffleDescriptor.class, true, localPartitionId);
			verifyShuffleDescriptor(remoteShuffleDescriptor, UnknownShuffleDescriptor.class, true, remotePartitionId);
		}

		verifyShuffleDescriptor(unknownShuffleDescriptor, UnknownShuffleDescriptor.class, true, unknownPartitionId);
	}
}
 
Example 19
Source File: SubtaskExecutionAttemptAccumulatorsHandlerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testHandleRequest() throws Exception {

	// Instance the handler.
	final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());

	final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(
		() -> null,
		Time.milliseconds(100L),
		Collections.emptyMap(),
		SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
		new ExecutionGraphCache(
			restHandlerConfiguration.getTimeout(),
			Time.milliseconds(restHandlerConfiguration.getRefreshInterval())),
		TestingUtils.defaultExecutor());

	// Instance a empty request.
	final HandlerRequest<EmptyRequestBody, SubtaskAttemptMessageParameters> request = new HandlerRequest<>(
		EmptyRequestBody.getInstance(),
		new SubtaskAttemptMessageParameters()
	);

	final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
	userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
	userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
	userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	// Instance the expected result.
	final StringifiedAccumulatorResult[] accumulatorResults =
		StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);

	final int attemptNum = 1;
	final int subtaskIndex = 2;

	// Instance the tested execution.
	final ArchivedExecution execution = new ArchivedExecution(
		accumulatorResults,
		null,
		new ExecutionAttemptID(),
		attemptNum,
		ExecutionState.FINISHED,
		null,
		null,
		null,
		subtaskIndex,
		new long[ExecutionState.values().length]);

	// Invoke tested method.
	final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);

	final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
	for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
		userAccumulatorList.add(
			new UserAccumulator(
				accumulatorResult.getName(),
				accumulatorResult.getType(),
				accumulatorResult.getValue()));
	}

	final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(
		subtaskIndex,
		attemptNum,
		execution.getAttemptId().toString(),
		userAccumulatorList);

	// Verify.
	assertEquals(expected, accumulatorsInfo);
}
 
Example 20
Source File: JobDetailsHandler.java    From flink with Apache License 2.0 4 votes vote down vote up
private static JobDetailsInfo createJobDetailsInfo(AccessExecutionGraph executionGraph, @Nullable MetricFetcher metricFetcher) {
	final long now = System.currentTimeMillis();
	final long startTime = executionGraph.getStatusTimestamp(JobStatus.CREATED);
	final long endTime = executionGraph.getState().isGloballyTerminalState() ?
		executionGraph.getStatusTimestamp(executionGraph.getState()) : -1L;
	final long duration = (endTime > 0L ? endTime : now) - startTime;

	final Map<JobStatus, Long> timestamps = new HashMap<>(JobStatus.values().length);

	for (JobStatus jobStatus : JobStatus.values()) {
		timestamps.put(jobStatus, executionGraph.getStatusTimestamp(jobStatus));
	}

	Collection<JobDetailsInfo.JobVertexDetailsInfo> jobVertexInfos = new ArrayList<>(executionGraph.getAllVertices().size());
	int[] jobVerticesPerState = new int[ExecutionState.values().length];

	for (AccessExecutionJobVertex accessExecutionJobVertex : executionGraph.getVerticesTopologically()) {
		final JobDetailsInfo.JobVertexDetailsInfo vertexDetailsInfo = createJobVertexDetailsInfo(
			accessExecutionJobVertex,
			now,
			executionGraph.getJobID(),
			metricFetcher);

		jobVertexInfos.add(vertexDetailsInfo);
		jobVerticesPerState[vertexDetailsInfo.getExecutionState().ordinal()]++;
	}

	Map<ExecutionState, Integer> jobVerticesPerStateMap = new HashMap<>(ExecutionState.values().length);

	for (ExecutionState executionState : ExecutionState.values()) {
		jobVerticesPerStateMap.put(executionState, jobVerticesPerState[executionState.ordinal()]);
	}

	return new JobDetailsInfo(
		executionGraph.getJobID(),
		executionGraph.getJobName(),
		executionGraph.isStoppable(),
		executionGraph.getState(),
		startTime,
		endTime,
		duration,
		now,
		timestamps,
		jobVertexInfos,
		jobVerticesPerStateMap,
		executionGraph.getJsonPlan());
}